From 959659c3fa373f31f741ff39a884cc3e4dc7211b Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 18 Jan 2017 11:07:35 -0800 Subject: [PATCH 001/276] Update release checklist with further details Add details for release checklist for where to put the update the changelist and which branch to cherry-pick from. Also add more details around creating the tag and verifying the tag is correct. Signed-off-by: Derek McGowan (github: dmcgowan) --- RELEASE-CHECKLIST.md | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/RELEASE-CHECKLIST.md b/RELEASE-CHECKLIST.md index 49235cec..73eba5a8 100644 --- a/RELEASE-CHECKLIST.md +++ b/RELEASE-CHECKLIST.md @@ -1,19 +1,27 @@ ## Registry Release Checklist -10. Compile release notes detailing features and since the last release. Update the `CHANGELOG.md` file. +10. Compile release notes detailing features and since the last release. + + Update the `CHANGELOG.md` file and create a PR to master with the updates. +Once that PR has been approved by maintainers the change may be cherry-picked +to the release branch (new release branches may be forked from this commit). 20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` 30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. - ``` +``` make AUTHORS ``` 40. Create a signed tag. - Distribution uses semantic versioning. Tags are of the format `vx.y.z[-rcn]` -You will need PGP installed and a PGP key which has been added to your Github account. The comment for the tag should include the release notes. + Distribution uses semantic versioning. Tags are of the format +`vx.y.z[-rcn]`. You will need PGP installed and a PGP key which has been added +to your Github account. The comment for the tag should include the release +notes, use previous tags as a guide for formatting consistently. Run +`git tag -s vx.y.z[-rcn]` to create tag and `git -v vx.y.z[-rcn]` to verify tag, +check comment and correct commit hash. 50. Push the signed tag From beabc206e1f1ccca9981524d9edc3fe1aa782d48 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 23 Jan 2017 17:27:54 -0800 Subject: [PATCH 002/276] Update tests to use go1.8 rc2 Tests only, do not merge into master Signed-off-by: Derek McGowan (github: dmcgowan) --- Dockerfile | 2 +- circle.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 426954a1..fe00241a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.7-alpine +FROM golang:1.8rc2-alpine ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV DOCKER_BUILDTAGS include_oss include_gcs diff --git a/circle.yml b/circle.yml index 76276763..52dee234 100644 --- a/circle.yml +++ b/circle.yml @@ -8,7 +8,7 @@ machine: post: # go - - gvm install go1.7 --prefer-binary --name=stable + - gvm install go1.8rc2 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations From 245ca4659e09e9745f3cc1217bf56e946509220c Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Thu, 26 Jan 2017 11:46:00 -0800 Subject: [PATCH 003/276] reference: Replace EnsureTagged with TagNameOnly The common use case for this function is to add a default tag if the reference only has a name. The current behavior only adds the default tag if there is no *tag*, which requires most callers to check for a digest. Change the behavior to only add default tags to name-only references, and change the name to reflect this. The documentation already described the new behavior, so it does not need to be changed. Signed-off-by: Aaron Lehmann --- reference/normalize.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/reference/normalize.go b/reference/normalize.go index 67f48aa8..2d71fc5e 100644 --- a/reference/normalize.go +++ b/reference/normalize.go @@ -123,11 +123,10 @@ func (c canonicalReference) Familiar() Named { } } -// EnsureTagged adds the default tag "latest" to a reference if it only has +// TagNameOnly adds the default tag "latest" to a reference if it only has // a repo name. -func EnsureTagged(ref Named) NamedTagged { - namedTagged, ok := ref.(NamedTagged) - if !ok { +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { namedTagged, err := WithTag(ref, defaultTag) if err != nil { // Default tag must be valid, to create a NamedTagged @@ -137,7 +136,7 @@ func EnsureTagged(ref Named) NamedTagged { } return namedTagged } - return namedTagged + return ref } // ParseAnyReference parses a reference string as a possible identifier, From 7f565ed65a79dca47243efe0135ddc3d8791195a Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 27 Jan 2017 14:47:45 -0800 Subject: [PATCH 004/276] registry/storage: clean up zero-length blob comments A previous inspection of the code surrounding zero-length blobs led to some interesting question. After inspection, it was found that the hash was indeed for the empty string (""), and not an empty tar, so the code was correct. The variable naming and comments have been updated accordingly. Signed-off-by: Stephen J Day --- registry/storage/blob_test.go | 2 +- registry/storage/blobwriter.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/registry/storage/blob_test.go b/registry/storage/blob_test.go index e3e226b9..a263dd6c 100644 --- a/registry/storage/blob_test.go +++ b/registry/storage/blob_test.go @@ -530,7 +530,7 @@ func TestLayerUploadZeroLength(t *testing.T) { } bs := repository.Blobs(ctx) - simpleUpload(t, bs, []byte{}, digestSha256EmptyTar) + simpleUpload(t, bs, []byte{}, digestSha256Empty) } func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) { diff --git a/registry/storage/blobwriter.go b/registry/storage/blobwriter.go index 0c598e7a..d51e27ad 100644 --- a/registry/storage/blobwriter.go +++ b/registry/storage/blobwriter.go @@ -19,8 +19,8 @@ var ( ) const ( - // DigestSha256EmptyTar is the canonical sha256 digest of empty data - digestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + // digestSha256Empty is the canonical sha256 digest of empty data + digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ) // blobWriter is used to control the various aspects of resumable @@ -314,7 +314,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // If no data was received, we may not actually have a file on disk. Check // the size here and write a zero-length file to blobPath if this is the // case. For the most part, this should only ever happen with zero-length - // tars. + // blobs. if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: @@ -322,8 +322,8 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // get a hash, then the underlying file is deleted, we risk moving // a zero-length blob into a nonzero-length blob location. To // prevent this horrid thing, we employ the hack of only allowing - // to this happen for the digest of an empty tar. - if desc.Digest == digestSha256EmptyTar { + // to this happen for the digest of an empty blob. + if desc.Digest == digestSha256Empty { return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) } From 2d20471cd7aad78dcfd1b7d26cb2fb6e7c7efc11 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Tue, 31 Jan 2017 01:18:22 +0800 Subject: [PATCH 005/276] fixing acronyms case Signed-off-by: Eric Yang --- registry/storage/purgeuploads.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/registry/storage/purgeuploads.go b/registry/storage/purgeuploads.go index 7576b189..925b1ae9 100644 --- a/registry/storage/purgeuploads.go +++ b/registry/storage/purgeuploads.go @@ -80,7 +80,7 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv } - uuid, isContainingDir := uUIDFromPath(filePath) + uuid, isContainingDir := uuidFromPath(filePath) if uuid == "" { // Cannot reliably delete return nil @@ -111,10 +111,10 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv return uploads, errors } -// uUIDFromPath extracts the upload UUID from a given path +// uuidFromPath extracts the upload UUID from a given path // If the UUID is the last path component, this is the containing // directory for all upload files -func uUIDFromPath(path string) (string, bool) { +func uuidFromPath(path string) (string, bool) { components := strings.Split(path, "/") for i := len(components) - 1; i >= 0; i-- { if u, err := uuid.Parse(components[i]); err == nil { From 95daa793b83a21656fe6c13e6d5cf1c3999108c7 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Tue, 7 Feb 2017 17:43:28 -0800 Subject: [PATCH 006/276] Expose `DomainRegexp` from reference This fix is based on: https://github.com/docker/docker/pull/30746#discussion_r99650885 The goal is to reuse the `DomainRegexp` in docker to check for `:` pattern. Signed-off-by: Yong Tang --- reference/regexp.go | 10 +++++----- reference/regexp_test.go | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/reference/regexp.go b/reference/regexp.go index 405e995d..78603493 100644 --- a/reference/regexp.go +++ b/reference/regexp.go @@ -20,15 +20,15 @@ var ( optional(repeated(separatorRegexp, alphaNumericRegexp))) // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by domainRegexp + // repository name to start with a component as defined by DomainRegexp // and followed by an optional port. domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - // domainRegexp defines the structure of potential domain components + // DomainRegexp defines the structure of potential domain components // that may be part of image names. This is purposely a subset of what is // allowed by DNS to ensure backwards compatibility with Docker image // names. - domainRegexp = expression( + DomainRegexp = expression( domainComponentRegexp, optional(repeated(literal(`.`), domainComponentRegexp)), optional(literal(`:`), match(`[0-9]+`))) @@ -51,14 +51,14 @@ var ( // regexp has capturing groups for the domain and name part omitting // the separating forward slash from either. NameRegexp = expression( - optional(domainRegexp, literal(`/`)), + optional(DomainRegexp, literal(`/`)), nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp))) // anchoredNameRegexp is used to parse a name value, capturing the // domain and trailing components. anchoredNameRegexp = anchored( - optional(capture(domainRegexp), literal(`/`)), + optional(capture(DomainRegexp), literal(`/`)), capture(nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp)))) diff --git a/reference/regexp_test.go b/reference/regexp_test.go index c2126399..09bc8192 100644 --- a/reference/regexp_test.go +++ b/reference/regexp_test.go @@ -116,7 +116,7 @@ func TestDomainRegexp(t *testing.T) { match: true, }, } - r := regexp.MustCompile(`^` + domainRegexp.String() + `$`) + r := regexp.MustCompile(`^` + DomainRegexp.String() + `$`) for i := range hostcases { checkRegexp(t, r, hostcases[i]) } From 20036597bf540d81c518e8a2387a1bbc5ad3ce30 Mon Sep 17 00:00:00 2001 From: kaiwentan Date: Sat, 21 Jan 2017 00:05:33 +0800 Subject: [PATCH 007/276] fix some misspells Signed-off-by: kaiwentan --- configuration/configuration.go | 2 +- context/doc.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/configuration/configuration.go b/configuration/configuration.go index d72fe7d1..4cbe8a01 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -329,7 +329,7 @@ type Health struct { type v0_1Configuration Configuration // UnmarshalYAML implements the yaml.Unmarshaler interface -// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints +// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent unsigned integers func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { var versionString string err := unmarshal(&versionString) diff --git a/context/doc.go b/context/doc.go index 3b4ab888..9b623074 100644 --- a/context/doc.go +++ b/context/doc.go @@ -64,7 +64,7 @@ // Note that this only affects the new context, the previous context, with the // version field, can be used independently. Put another way, the new logger, // added to the request context, is unique to that context and can have -// request scoped varaibles. +// request scoped variables. // // HTTP Requests // From 75c2e524a1bbfd6eb678b0db7a3b3178e73b756a Mon Sep 17 00:00:00 2001 From: uhayate Date: Mon, 13 Feb 2017 17:20:07 +0800 Subject: [PATCH 008/276] refactor the code style in distribution/registry/storage/driver/s3-goamz/s3.go Signed-off-by: uhayate --- registry/storage/driver/s3-goamz/s3.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/registry/storage/driver/s3-goamz/s3.go b/registry/storage/driver/s3-goamz/s3.go index 33751c16..4af86765 100644 --- a/registry/storage/driver/s3-goamz/s3.go +++ b/registry/storage/driver/s3-goamz/s3.go @@ -443,14 +443,14 @@ func (d *driver) List(ctx context.Context, opath string) ([]string, error) { directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) } - if listResponse.IsTruncated { - listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) - if err != nil { - return nil, err - } - } else { + if !listResponse.IsTruncated { break } + + listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } } if opath != "/" { From 72bdf0e320fb9af437a361c75e5b24acd233cd1b Mon Sep 17 00:00:00 2001 From: sakeven Date: Tue, 7 Feb 2017 17:16:16 +0800 Subject: [PATCH 009/276] check whether must use v4 auth in specific aws region Signed-off-by: sakeven --- registry/storage/driver/s3-goamz/s3.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/registry/storage/driver/s3-goamz/s3.go b/registry/storage/driver/s3-goamz/s3.go index 33751c16..4cfe0192 100644 --- a/registry/storage/driver/s3-goamz/s3.go +++ b/registry/storage/driver/s3-goamz/s3.go @@ -266,10 +266,8 @@ func New(params DriverParameters) (*Driver, error) { if params.V4Auth { s3obj.Signature = aws.V4Signature - } else { - if params.Region.Name == "eu-central-1" { - return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") - } + } else if mustV4Auth(params.Region.Name) { + return nil, fmt.Errorf("The %s region only works with v4 authentication", params.Region.Name) } bucket := s3obj.Bucket(params.Bucket) @@ -573,6 +571,17 @@ func getPermissions() s3.ACL { return s3.Private } +// mustV4Auth checks whether must use v4 auth in specific region. +// Please see documentation at http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html +func mustV4Auth(region string) bool { + switch region { + case "eu-central-1", "cn-north-1", "us-east-2", + "ca-central-1", "ap-south-1", "ap-northeast-2", "eu-west-2": + return true + } + return false +} + func (d *driver) getContentType() string { return "application/octet-stream" } From 9a58c91051e03b46f1461e371a7bf527c1284612 Mon Sep 17 00:00:00 2001 From: Noah Treuhaft Date: Wed, 8 Feb 2017 11:38:44 -0800 Subject: [PATCH 010/276] notifications: fix expvar for Go 1.7 Remove EndpointConfig.Transport from the return value of the registry.notifications.endpoints expvar.Func. It results in an empty value for that expvar variable under Go 1.7 because it is a non-nil *http.Transport, which Go 1.7 can no longer encode as JSON. Signed-off-by: Noah Treuhaft --- notifications/endpoint.go | 2 +- notifications/metrics_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 notifications/metrics_test.go diff --git a/notifications/endpoint.go b/notifications/endpoint.go index 29a9e27b..44d0f6d7 100644 --- a/notifications/endpoint.go +++ b/notifications/endpoint.go @@ -13,7 +13,7 @@ type EndpointConfig struct { Threshold int Backoff time.Duration IgnoredMediaTypes []string - Transport *http.Transport + Transport *http.Transport `json:"-"` } // defaults set any zero-valued fields to a reasonable default. diff --git a/notifications/metrics_test.go b/notifications/metrics_test.go new file mode 100644 index 00000000..03a08e2c --- /dev/null +++ b/notifications/metrics_test.go @@ -0,0 +1,28 @@ +package notifications + +import ( + "encoding/json" + "expvar" + "testing" +) + +func TestMetricsExpvar(t *testing.T) { + endpointsVar := expvar.Get("registry").(*expvar.Map).Get("notifications").(*expvar.Map).Get("endpoints") + + var v interface{} + if err := json.Unmarshal([]byte(endpointsVar.String()), &v); err != nil { + t.Fatalf("unexpected error unmarshaling endpoints: %v", err) + } + if v != nil { + t.Fatalf("expected nil, got %#v", v) + } + + NewEndpoint("x", "y", EndpointConfig{}) + + if err := json.Unmarshal([]byte(endpointsVar.String()), &v); err != nil { + t.Fatalf("unexpected error unmarshaling endpoints: %v", err) + } + if slice, ok := v.([]interface{}); !ok || len(slice) != 1 { + t.Logf("expected one-element []interface{}, got %#v", v) + } +} From 9b1e89375599573733d4b9a0717345f923c1fbc7 Mon Sep 17 00:00:00 2001 From: Anton Tiurin Date: Fri, 20 Jan 2017 14:20:52 +0300 Subject: [PATCH 011/276] Introduce dynamic plugins go1.8 Plugin package brings a mechanism for dynamyc loading. StorageDriver or AccessController can be compiled as plugin and can be loaded at runtime. Signed-off-by: Anton Tiurin --- Dockerfile | 4 +- circle.yml | 2 +- configuration/configuration.go | 3 + docs/configuration.md | 19 +++++ registry/auth/auth.go | 11 ++- registry/handlers/app.go | 15 ++-- registry/pluginloader/loaders_18_test.go | 80 +++++++++++++++++++ registry/pluginloader/pluginloader.go | 54 +++++++++++++ registry/pluginloader/pluginloader_pre18.go | 13 +++ registry/pluginloader/suffix.go | 5 ++ registry/pluginloader/suffix_darwin.go | 5 ++ registry/pluginloader/suffix_windows.go | 5 ++ .../pluginloader/testplugin/testplugin.go | 8 ++ 13 files changed, 214 insertions(+), 10 deletions(-) create mode 100644 registry/pluginloader/loaders_18_test.go create mode 100644 registry/pluginloader/pluginloader.go create mode 100644 registry/pluginloader/pluginloader_pre18.go create mode 100644 registry/pluginloader/suffix.go create mode 100644 registry/pluginloader/suffix_darwin.go create mode 100644 registry/pluginloader/suffix_windows.go create mode 100644 registry/pluginloader/testplugin/testplugin.go diff --git a/Dockerfile b/Dockerfile index fe00241a..c7efaddf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,10 +1,10 @@ -FROM golang:1.8rc2-alpine +FROM golang:1.8-alpine ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV DOCKER_BUILDTAGS include_oss include_gcs RUN set -ex \ - && apk add --no-cache make git + && apk add --no-cache make git build-base WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR diff --git a/circle.yml b/circle.yml index 52dee234..045d13db 100644 --- a/circle.yml +++ b/circle.yml @@ -8,7 +8,7 @@ machine: post: # go - - gvm install go1.8rc2 --prefer-binary --name=stable + - gvm install go1.8 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations diff --git a/configuration/configuration.go b/configuration/configuration.go index d72fe7d1..d04d3d41 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -48,6 +48,9 @@ type Configuration struct { // deprecated. Please use Log.Level in the future. Loglevel Loglevel `yaml:"loglevel,omitempty"` + // Plugins is a path where plugins are expected to be found + Plugins []string `yaml:"plugins,omitempty"` + // Storage is the configuration for the registry's storage driver Storage Storage `yaml:"storage"` diff --git a/docs/configuration.md b/docs/configuration.md index 805328ea..e4bc9909 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -89,6 +89,10 @@ log: to: - errors@example.com loglevel: debug # deprecated: use "log" +plugins: + - /plugins/ + - /plugin/driver1.so + - /plugin/auth1.so storage: filesystem: rootdirectory: /var/lib/registry @@ -359,6 +363,21 @@ loglevel: debug Permitted values are `error`, `warn`, `info` and `debug`. The default is `info`. +## `plugins` + +```none +plugins: + - /plugins/ + - /plugin/driver1.so + - /plugin/auth1.so +``` + +> Requires golang >= 1.8 + +Directory with plugins or paths point to plugins. Plugin are loaded before trying to initialize +any driver or authcontroller. If a path is a directory, it is scanned for plugins loading all files +with a is common shared library extension on the platform (`.so`, `.dylib`, `dll`). + ## `storage` ```none diff --git a/registry/auth/auth.go b/registry/auth/auth.go index 1c9af882..f2db0e92 100644 --- a/registry/auth/auth.go +++ b/registry/auth/auth.go @@ -198,5 +198,14 @@ func GetAccessController(name string, options map[string]interface{}) (AccessCon return initFunc(options) } - return nil, fmt.Errorf("no access controller registered with name: %s", name) + return nil, InvalidAccessControllerError{name} +} + +// InvalidAccessControllerError records an attempt to construct an unregistered storage driver +type InvalidAccessControllerError struct { + Name string +} + +func (err InvalidAccessControllerError) Error() string { + return fmt.Sprintf("no access controller registered with name: %s", err.Name) } diff --git a/registry/handlers/app.go b/registry/handlers/app.go index 04e346cc..5e4bac7f 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -27,6 +27,7 @@ import ( "github.com/docker/distribution/registry/auth" registrymiddleware "github.com/docker/distribution/registry/middleware/registry" repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" + "github.com/docker/distribution/registry/pluginloader" "github.com/docker/distribution/registry/proxy" "github.com/docker/distribution/registry/storage" memorycache "github.com/docker/distribution/registry/storage/cache/memory" @@ -117,9 +118,6 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { var err error app.driver, err = factory.Create(config.Storage.Type(), storageParams) if err != nil { - // TODO(stevvooe): Move the creation of a service into a protected - // method, where this is created lazily. Its status can be queried via - // a health check. panic(err) } @@ -157,6 +155,12 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { app.configureRedis(config) app.configureLogHook(config) + if len(config.Plugins) != 0 { + if err = pluginloader.LoadPlugins(app, config.Plugins); err != nil { + ctxu.GetLogger(app).Errorf("could not load plugins from %s: %v", config.Plugins, err) + } + } + options := registrymiddleware.GetRegistryOptions() if config.Compatibility.Schema1.TrustKey != "" { app.trustKey, err = libtrust.LoadKeyFile(config.Compatibility.Schema1.TrustKey) @@ -301,11 +305,10 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { authType := config.Auth.Type() if authType != "" { - accessController, err := auth.GetAccessController(config.Auth.Type(), config.Auth.Parameters()) + app.accessController, err = auth.GetAccessController(authType, config.Auth.Parameters()) if err != nil { - panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) + panic(err) } - app.accessController = accessController ctxu.GetLogger(app).Debugf("configured %q access controller", authType) } diff --git a/registry/pluginloader/loaders_18_test.go b/registry/pluginloader/loaders_18_test.go new file mode 100644 index 00000000..359d6fc0 --- /dev/null +++ b/registry/pluginloader/loaders_18_test.go @@ -0,0 +1,80 @@ +// +build go1.8,!race + +package pluginloader + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" + + check "gopkg.in/check.v1" + + ctxu "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/distribution/registry/storage/driver/testsuites" +) + +func TestPlugins(t *testing.T) { + tempdir, err := ioutil.TempDir("", "ditributiontestplugins") + if err != nil { + t.Fatalf("can not create tempdirectory %v\n", err) + } + defer os.RemoveAll(tempdir) + + pluginpath := filepath.Join(tempdir, "testplugin"+suffix) + gobinary, err := exec.LookPath("go") + if err != nil { + t.Fatalf("LookPath can not locate go binary: %v", err) + } + t.Logf("compile plugin into %s by %s\n", pluginpath, gobinary) + cmd := exec.Command(gobinary, "build", "-o", pluginpath, "-buildmode", "plugin", "github.com/docker/distribution/registry/pluginloader/testplugin") + if err = cmd.Run(); err != nil { + output, _ := cmd.CombinedOutput() + t.Fatalf("plugin compilation failed %v %s\n", err, output) + } + + t.Run("TestLoadPlugin", func(t *testing.T) { + _, err := factory.Create("inmemory", nil) + if _, ok := err.(factory.InvalidStorageDriverError); !ok { + t.Fatalf("inmemory driver is not expected to be built in: %T\n", err) + } + + _, err = auth.GetAccessController("silly", nil) + if _, ok := err.(auth.InvalidAccessControllerError); !ok { + t.Fatalf("silly plugin is not expected to be built in: %T\n", err) + } + + err = LoadPlugins(ctxu.Background(), []string{pluginpath}) + if err != nil { + t.Fatalf("loading failed %v\n", err) + } + + dr, err := factory.Create("inmemory", nil) + if err != nil { + t.Fatalf("dynamic driver construction failed %v\n", err) + } + if dr == nil { + t.Fatal("driver is not expected to be nil") + } + + ac, err := auth.GetAccessController("silly", map[string]interface{}{"realm": "realm", "service": "dummy"}) + if err != nil { + t.Fatalf("AccessController construction failed %v\n", err) + } + if ac == nil { + t.Fatal("AccessController is not expected to be nil") + } + }) + + t.Run("InmemoryDriverTestSuite", func(t *testing.T) { + inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { + return factory.Create("inmemory", nil) + } + testsuites.RegisterSuite(inmemoryDriverConstructor, testsuites.NeverSkip) + check.TestingT(t) + }) +} diff --git a/registry/pluginloader/pluginloader.go b/registry/pluginloader/pluginloader.go new file mode 100644 index 00000000..141d26ec --- /dev/null +++ b/registry/pluginloader/pluginloader.go @@ -0,0 +1,54 @@ +// +build go1.8 + +package pluginloader + +import ( + "os" + "path/filepath" + "plugin" + + ctxu "github.com/docker/distribution/context" +) + +// LoadPlugins loads plugins pointed by paths. If a path points to a directory +// this directory is scanned for files with a platform specific shared library suffix (like .so, .dylib, .dll). +// NOTE: Plugins are expected to register themselves the same way as built-ins do. +// Storage drivers should use `factory.Register`, AccessControllers `auth.Register` in init(). +func LoadPlugins(ctx ctxu.Context, paths []string) error { + for _, pluginpath := range paths { + fi, err := os.Stat(pluginpath) + if err != nil { + if os.IsNotExist(err) { + ctxu.GetLogger(ctx).Errorf("plugin file %s does not exist", pluginpath) + } else { + ctxu.GetLogger(ctx).Errorf("could not Stat plugin file %s: %v", pluginpath, err) + } + continue + } + + if !fi.IsDir() { + if err = loadplugin(pluginpath); err != nil { + ctxu.GetLogger(ctx).Errorf("could not load plugin %s: %v", pluginpath, err) + } + continue + } + + // To preserve the order we do not append this plugins to the paths + matches, err := filepath.Glob(filepath.Join(pluginpath, "*"+suffix)) + if err != nil { + return err + } + + for _, pluginpath := range matches { + if err = loadplugin(pluginpath); err != nil { + ctxu.GetLogger(ctx).Errorf("could not load plugin %s: %v", pluginpath, err) + } + } + } + return nil +} + +func loadplugin(path string) error { + _, err := plugin.Open(path) + return err +} diff --git a/registry/pluginloader/pluginloader_pre18.go b/registry/pluginloader/pluginloader_pre18.go new file mode 100644 index 00000000..10d1271c --- /dev/null +++ b/registry/pluginloader/pluginloader_pre18.go @@ -0,0 +1,13 @@ +// +build !go1.8 + +package pluginloader + +import ( + "fmt" + + "github.com/docker/distribution/context" +) + +func LoadPlugins(ctx context.Context, paths []string) error { + return fmt.Errorf("only golang >= 1.8 supports dynamic plugins") +} diff --git a/registry/pluginloader/suffix.go b/registry/pluginloader/suffix.go new file mode 100644 index 00000000..931947bc --- /dev/null +++ b/registry/pluginloader/suffix.go @@ -0,0 +1,5 @@ +// +build !darwin,!windows + +package pluginloader + +const suffix = ".so" diff --git a/registry/pluginloader/suffix_darwin.go b/registry/pluginloader/suffix_darwin.go new file mode 100644 index 00000000..e523bfc1 --- /dev/null +++ b/registry/pluginloader/suffix_darwin.go @@ -0,0 +1,5 @@ +// +build darwin + +package pluginloader + +const suffix = ".dylib" diff --git a/registry/pluginloader/suffix_windows.go b/registry/pluginloader/suffix_windows.go new file mode 100644 index 00000000..7715a2a2 --- /dev/null +++ b/registry/pluginloader/suffix_windows.go @@ -0,0 +1,5 @@ +// +build windows + +package pluginloader + +const suffix = ".dll" diff --git a/registry/pluginloader/testplugin/testplugin.go b/registry/pluginloader/testplugin/testplugin.go new file mode 100644 index 00000000..5f298a2f --- /dev/null +++ b/registry/pluginloader/testplugin/testplugin.go @@ -0,0 +1,8 @@ +package main + +import ( + _ "github.com/docker/distribution/registry/auth/silly" + _ "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func main() {} From b2fb1586cecd8d2d8418dd681d616a029d32d01e Mon Sep 17 00:00:00 2001 From: Anton Tiurin Date: Fri, 17 Feb 2017 03:19:47 +0300 Subject: [PATCH 012/276] circleci: pluginloader is tested w/o coverpkg Signed-off-by: Anton Tiurin --- circle.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/circle.yml b/circle.yml index 045d13db..93af09bb 100644 --- a/circle.yml +++ b/circle.yml @@ -72,7 +72,12 @@ test: override: # Test stable, and report - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v pluginloader | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': + timeout: 1000 + pwd: $BASE_STABLE + + # coverpkg changes the signature of compiled packages + - gvm use stable; export PACKAGE=github.com/docker/distribution/registry/pluginloader; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out $PACKAGE: timeout: 1000 pwd: $BASE_STABLE From 6a8e2ca84f0dd54f0e35d842d40e828de495baef Mon Sep 17 00:00:00 2001 From: fate-grand-order Date: Thu, 16 Feb 2017 10:57:21 +0800 Subject: [PATCH 013/276] Use errors.New() to output the error message and fix some typos Signed-off-by: fate-grand-order --- blobs.go | 2 +- cmd/digest/main.go | 2 +- configuration/configuration.go | 7 ++++--- errors.go | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/blobs.go b/blobs.go index 79c5fb33..01d30902 100644 --- a/blobs.go +++ b/blobs.go @@ -152,7 +152,7 @@ type BlobProvider interface { // BlobServer can serve blobs via http. type BlobServer interface { - // ServeBlob attempts to serve the blob, identifed by dgst, via http. The + // ServeBlob attempts to serve the blob, identified by dgst, via http. The // service may decide to redirect the client elsewhere or serve the data // directly. // diff --git a/cmd/digest/main.go b/cmd/digest/main.go index 9582d197..20f64ddb 100644 --- a/cmd/digest/main.go +++ b/cmd/digest/main.go @@ -32,7 +32,7 @@ func init() { func usage() { fmt.Fprintf(os.Stderr, "usage: %s [files...]\n", os.Args[0]) - fmt.Fprintf(os.Stderr, ` + fmt.Fprint(os.Stderr, ` Calculate the digest of one or more input files, emitting the result to standard out. If no files are provided, the digest of stdin will be calculated. diff --git a/configuration/configuration.go b/configuration/configuration.go index d72fe7d1..8931557e 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -1,6 +1,7 @@ package configuration import ( + "errors" "fmt" "io" "io/ioutil" @@ -132,7 +133,7 @@ type Configuration struct { // HTTP2 configuration options HTTP2 struct { - // Specifies wether the registry should disallow clients attempting + // Specifies whether the registry should disallow clients attempting // to connect via http2. If set to true, only http/1.1 is supported. Disabled bool `yaml:"disabled,omitempty"` } `yaml:"http2,omitempty"` @@ -235,7 +236,7 @@ type LogHook struct { // Levels set which levels of log message will let hook executed. Levels []string `yaml:"levels,omitempty"` - // MailOptions allows user to configurate email parameters. + // MailOptions allows user to configure email parameters. MailOptions MailOptions `yaml:"options,omitempty"` } @@ -627,7 +628,7 @@ func Parse(rd io.Reader) (*Configuration, error) { v0_1.Loglevel = Loglevel("info") } if v0_1.Storage.Type() == "" { - return nil, fmt.Errorf("No storage configuration provided") + return nil, errors.New("No storage configuration provided") } return (*Configuration)(v0_1), nil } diff --git a/errors.go b/errors.go index 2062a06f..020d3325 100644 --- a/errors.go +++ b/errors.go @@ -77,7 +77,7 @@ func (err ErrManifestUnknownRevision) Error() string { type ErrManifestUnverified struct{} func (ErrManifestUnverified) Error() string { - return fmt.Sprintf("unverified manifest") + return "unverified manifest" } // ErrManifestVerification provides a type to collect errors encountered From 3161f9d1fdac8e43a73c4522d26f1db068b74d1c Mon Sep 17 00:00:00 2001 From: fate-grand-order Date: Mon, 27 Feb 2017 14:06:18 +0800 Subject: [PATCH 014/276] fix typos in comment Signed-off-by: fate-grand-order --- context/doc.go | 2 +- health/health_test.go | 4 ++-- manifest/manifestlist/manifestlist.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/context/doc.go b/context/doc.go index 3b4ab888..9b623074 100644 --- a/context/doc.go +++ b/context/doc.go @@ -64,7 +64,7 @@ // Note that this only affects the new context, the previous context, with the // version field, can be used independently. Put another way, the new logger, // added to the request context, is unique to that context and can have -// request scoped varaibles. +// request scoped variables. // // HTTP Requests // diff --git a/health/health_test.go b/health/health_test.go index 766fe159..8d1a028b 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -25,8 +25,8 @@ func TestReturns200IfThereAreNoChecks(t *testing.T) { } } -// TestReturns500IfThereAreErrorChecks ensures that the result code of the -// health endpoint is 500 if there are health checks with errors +// TestReturns503IfThereAreErrorChecks ensures that the result code of the +// health endpoint is 503 if there are health checks with errors. func TestReturns503IfThereAreErrorChecks(t *testing.T) { recorder := httptest.NewRecorder() diff --git a/manifest/manifestlist/manifestlist.go b/manifest/manifestlist/manifestlist.go index 7a8cabbd..3aa0662d 100644 --- a/manifest/manifestlist/manifestlist.go +++ b/manifest/manifestlist/manifestlist.go @@ -81,7 +81,7 @@ type ManifestList struct { Manifests []ManifestDescriptor `json:"manifests"` } -// References returnes the distribution descriptors for the referenced image +// References returns the distribution descriptors for the referenced image // manifests. func (m ManifestList) References() []distribution.Descriptor { dependencies := make([]distribution.Descriptor, len(m.Manifests)) From 0810eba2adf048b77621472991211924d9ec31c5 Mon Sep 17 00:00:00 2001 From: Christy Perez Date: Wed, 1 Mar 2017 12:24:08 -0600 Subject: [PATCH 015/276] Better error message for BuildManifestURL if not tagged or digested Since there's no default case, if there's not a tag or digest you get back a confusing error from the router about it not matching the expected pattern. Also redoing the tests for URLs a bit so that they can handle checking for failures. Signed-off-by: Christy Perez --- registry/api/v2/urls.go | 3 ++ registry/api/v2/urls_test.go | 54 ++++++++++++++++++++++++++++++------ 2 files changed, 48 insertions(+), 9 deletions(-) diff --git a/registry/api/v2/urls.go b/registry/api/v2/urls.go index e2e242ea..3eb3e09d 100644 --- a/registry/api/v2/urls.go +++ b/registry/api/v2/urls.go @@ -1,6 +1,7 @@ package v2 import ( + "fmt" "net" "net/http" "net/url" @@ -175,6 +176,8 @@ func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { tagOrDigest = v.Tag() case reference.Digested: tagOrDigest = v.Digest().String() + default: + return "", fmt.Errorf("reference must have a tag or digest") } manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) diff --git a/registry/api/v2/urls_test.go b/registry/api/v2/urls_test.go index c5da0062..6449c6e4 100644 --- a/registry/api/v2/urls_test.go +++ b/registry/api/v2/urls_test.go @@ -1,8 +1,10 @@ package v2 import ( + "fmt" "net/http" "net/url" + "reflect" "testing" "github.com/docker/distribution/reference" @@ -11,6 +13,7 @@ import ( type urlBuilderTestCase struct { description string expectedPath string + expectedErr error build func() (string, error) } @@ -20,26 +23,38 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { { description: "test base url", expectedPath: "/v2/", + expectedErr: nil, build: urlBuilder.BuildBaseURL, }, { description: "test tags url", expectedPath: "/v2/foo/bar/tags/list", + expectedErr: nil, build: func() (string, error) { return urlBuilder.BuildTagsURL(fooBarRef) }, }, { - description: "test manifest url", + description: "test manifest url tagged ref", expectedPath: "/v2/foo/bar/manifests/tag", + expectedErr: nil, build: func() (string, error) { ref, _ := reference.WithTag(fooBarRef, "tag") return urlBuilder.BuildManifestURL(ref) }, }, + { + description: "test manifest url bare ref", + expectedPath: "", + expectedErr: fmt.Errorf("reference must have a tag or digest"), + build: func() (string, error) { + return urlBuilder.BuildManifestURL(fooBarRef) + }, + }, { description: "build blob url", expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", + expectedErr: nil, build: func() (string, error) { ref, _ := reference.WithDigest(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") return urlBuilder.BuildBlobURL(ref) @@ -48,6 +63,7 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { { description: "build blob upload url", expectedPath: "/v2/foo/bar/blobs/uploads/", + expectedErr: nil, build: func() (string, error) { return urlBuilder.BuildBlobUploadURL(fooBarRef) }, @@ -55,6 +71,7 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { { description: "build blob upload url with digest and size", expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", + expectedErr: nil, build: func() (string, error) { return urlBuilder.BuildBlobUploadURL(fooBarRef, url.Values{ "size": []string{"10000"}, @@ -65,6 +82,7 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { { description: "build blob upload chunk url", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", + expectedErr: nil, build: func() (string, error) { return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part") }, @@ -72,6 +90,7 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { { description: "build blob upload chunk url with digest and size", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", + expectedErr: nil, build: func() (string, error) { return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part", url.Values{ "size": []string{"10000"}, @@ -101,9 +120,14 @@ func TestURLBuilder(t *testing.T) { for _, testCase := range makeURLBuilderTestCases(urlBuilder) { url, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) + expectedErr := testCase.expectedErr + if !reflect.DeepEqual(expectedErr, err) { + t.Fatalf("%s: Expecting %v but got error %v", testCase.description, expectedErr, err) } + if expectedErr != nil { + continue + } + expectedURL := testCase.expectedPath if !relative { expectedURL = root + expectedURL @@ -136,8 +160,12 @@ func TestURLBuilderWithPrefix(t *testing.T) { for _, testCase := range makeURLBuilderTestCases(urlBuilder) { url, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) + expectedErr := testCase.expectedErr + if !reflect.DeepEqual(expectedErr, err) { + t.Fatalf("%s: Expecting %v but got error %v", testCase.description, expectedErr, err) + } + if expectedErr != nil { + continue } expectedURL := testCase.expectedPath @@ -392,8 +420,12 @@ func TestBuilderFromRequest(t *testing.T) { for _, testCase := range makeURLBuilderTestCases(builder) { buildURL, err := testCase.build() - if err != nil { - t.Fatalf("[relative=%t, request=%q, case=%q]: error building url: %v", relative, tr.name, testCase.description, err) + expectedErr := testCase.expectedErr + if !reflect.DeepEqual(expectedErr, err) { + t.Fatalf("%s: Expecting %v but got error %v", testCase.description, expectedErr, err) + } + if expectedErr != nil { + continue } var expectedURL string @@ -475,8 +507,12 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { for _, testCase := range makeURLBuilderTestCases(builder) { buildURL, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) + expectedErr := testCase.expectedErr + if !reflect.DeepEqual(expectedErr, err) { + t.Fatalf("%s: Expecting %v but got error %v", testCase.description, expectedErr, err) + } + if expectedErr != nil { + continue } var expectedURL string From 45bb7c9cc90743aca8b50eb32519d405c0ef78f9 Mon Sep 17 00:00:00 2001 From: Alvin Feng Date: Fri, 17 Mar 2017 23:35:50 +0000 Subject: [PATCH 016/276] Remove expires tag from s3 upload Signed-off-by: Alvin Feng --- registry/storage/driver/s3-aws/s3.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 872b8938..469e3997 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -701,15 +701,11 @@ func (d *driver) copy(ctx context.Context, sourcePath string, destPath string) e return nil } - // Even in the worst case, a multipart copy should take no more - // than a few minutes, so 30 minutes is very conservative. - expires := time.Now().Add(time.Duration(30) * time.Minute) createResp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ Bucket: aws.String(d.Bucket), Key: aws.String(d.s3Path(destPath)), ContentType: d.getContentType(), ACL: d.getACL(), - Expires: aws.Time(expires), SSEKMSKeyId: d.getSSEKMSKeyID(), ServerSideEncryption: d.getEncryptionMode(), StorageClass: d.getStorageClass(), From eaf60fffee2495c6e1b13ff53619063c2bc8f302 Mon Sep 17 00:00:00 2001 From: fate-grand-order Date: Mon, 20 Mar 2017 20:15:55 +0800 Subject: [PATCH 017/276] fix some typos in notifications/event.go and sinks.go Signed-off-by: fate-grand-order --- notifications/event.go | 4 ++-- notifications/sinks.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/notifications/event.go b/notifications/event.go index b59a72be..9651cd1b 100644 --- a/notifications/event.go +++ b/notifications/event.go @@ -77,7 +77,7 @@ type Event struct { Request RequestRecord `json:"request,omitempty"` // Actor specifies the agent that initiated the event. For most - // situations, this could be from the authorizaton context of the request. + // situations, this could be from the authorization context of the request. Actor ActorRecord `json:"actor,omitempty"` // Source identifies the registry node that generated the event. Put @@ -87,7 +87,7 @@ type Event struct { } // ActorRecord specifies the agent that initiated the event. For most -// situations, this could be from the authorizaton context of the request. +// situations, this could be from the authorization context of the request. // Data in this record can refer to both the initiating client and the // generating request. type ActorRecord struct { diff --git a/notifications/sinks.go b/notifications/sinks.go index 549ba97e..beb8bad4 100644 --- a/notifications/sinks.go +++ b/notifications/sinks.go @@ -151,7 +151,7 @@ func (eq *eventQueue) Write(events ...Event) error { return nil } -// Close shutsdown the event queue, flushing +// Close shuts down the event queue, flushing func (eq *eventQueue) Close() error { eq.mu.Lock() defer eq.mu.Unlock() From f6c62456b8014388b32327b0c520749ac73b4879 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 20 Mar 2017 10:53:40 -0700 Subject: [PATCH 018/276] Fix vendor validation The dep-validate target appears to never get invoked. In the CircleCI build environment, "master" points to the commit under test. The circle.yml fragment needs to compare again "origin/master" instead. Signed-off-by: Aaron Lehmann --- circle.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/circle.yml b/circle.yml index 76276763..b49d64ae 100644 --- a/circle.yml +++ b/circle.yml @@ -50,7 +50,7 @@ test: - gvm use stable && go version # Ensure validation of dependencies - - gvm use stable && if test -n "`git diff --stat=1000 master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi: + - gvm use stable && if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi: pwd: $BASE_STABLE # First thing: build everything. This will catch compile errors, and it's From 299b90b0bdfc67cad92c56659de72a8bbd11e878 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 20 Mar 2017 11:12:03 -0700 Subject: [PATCH 019/276] Makefile: Restore vendor directory after dep-validate Currently, this target makes changes to the vendor directory, which may overwrite local changes or cause build errors. It's better to restore the original vendor directory after running the check. Signed-off-by: Aaron Lehmann --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 339bbc43..7c6f9c7a 100644 --- a/Makefile +++ b/Makefile @@ -95,4 +95,5 @@ dep-validate: @$(VNDR) @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ (echo >&2 "+ inconsistent dependencies! what you have in vendor.conf does not match with what you have in vendor" && false) - @rm -Rf .vendor.bak + @rm -Rf vendor + @mv .vendor.bak vendor From d4c3e8842602e0192657fcf05f1804fbfc7f54fd Mon Sep 17 00:00:00 2001 From: Troels Thomsen Date: Mon, 20 Mar 2017 16:10:36 -0700 Subject: [PATCH 020/276] Add test for precendence with standard port Signed-off-by: Troels Thomsen Signed-off-by: Derek McGowan (github: dmcgowan) --- registry/api/v2/urls_test.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/registry/api/v2/urls_test.go b/registry/api/v2/urls_test.go index 6449c6e4..dae8354b 100644 --- a/registry/api/v2/urls_test.go +++ b/registry/api/v2/urls_test.go @@ -264,6 +264,22 @@ func TestBuilderFromRequest(t *testing.T) { }}, base: "http://example.com:443", }, + { + name: "forwarded standard port with non-standard headers", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Port": []string{"443"}, + }}, + base: "https://example.com", + }, + { + name: "forwarded standard port with non-standard headers and explicit port", + request: &http.Request{URL: u, Host: u.Host + ":443", Header: http.Header{ + "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Port": []string{"443"}, + }}, + base: "https://example.com:443", + }, { name: "several non-standard headers", request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ From 81a47d976695238dc4b281e201e81fae3785861a Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 20 Mar 2017 16:13:33 -0700 Subject: [PATCH 021/276] Remove support for X-Forwarded-Port Partially reverts change adding support for X-Forwarded-Port. Changes the logic to prefer the standard Forwarded header over X-Forwarded headers. Prefer forwarded "host" over "for" since "for" represents the client and not the client's request. Signed-off-by: Derek McGowan (github: dmcgowan) --- registry/api/v2/urls.go | 113 +++++------------ registry/api/v2/urls_test.go | 237 ++++++----------------------------- 2 files changed, 72 insertions(+), 278 deletions(-) diff --git a/registry/api/v2/urls.go b/registry/api/v2/urls.go index 3eb3e09d..1337bdb1 100644 --- a/registry/api/v2/urls.go +++ b/registry/api/v2/urls.go @@ -2,10 +2,8 @@ package v2 import ( "fmt" - "net" "net/http" "net/url" - "strconv" "strings" "github.com/docker/distribution/reference" @@ -49,66 +47,42 @@ func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { // NewURLBuilderFromRequest uses information from an *http.Request to // construct the root url. func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { - var scheme string - - forwardedProto := r.Header.Get("X-Forwarded-Proto") - // TODO: log the error - forwardedHeader, _, _ := parseForwardedHeader(r.Header.Get("Forwarded")) - - switch { - case len(forwardedProto) > 0: - scheme = forwardedProto - case len(forwardedHeader["proto"]) > 0: - scheme = forwardedHeader["proto"] - case r.TLS != nil: - scheme = "https" - case len(r.URL.Scheme) > 0: - scheme = r.URL.Scheme - default: + var ( scheme = "http" + host = r.Host + ) + + if r.TLS != nil { + scheme = "https" + } else if len(r.URL.Scheme) > 0 { + scheme = r.URL.Scheme } - host := r.Host - - if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { - // According to the Apache mod_proxy docs, X-Forwarded-Host can be a - // comma-separated list of hosts, to which each proxy appends the - // requested host. We want to grab the first from this comma-separated - // list. - hosts := strings.SplitN(forwardedHost, ",", 2) - host = strings.TrimSpace(hosts[0]) - } else if addr, exists := forwardedHeader["for"]; exists { - host = addr - } else if h, exists := forwardedHeader["host"]; exists { - host = h - } - - portLessHost, port := host, "" - if !isIPv6Address(portLessHost) { - // with go 1.6, this would treat the last part of IPv6 address as a port - portLessHost, port, _ = net.SplitHostPort(host) - } - if forwardedPort := r.Header.Get("X-Forwarded-Port"); len(port) == 0 && len(forwardedPort) > 0 { - ports := strings.SplitN(forwardedPort, ",", 2) - forwardedPort = strings.TrimSpace(ports[0]) - if _, err := strconv.ParseInt(forwardedPort, 10, 32); err == nil { - port = forwardedPort + // Handle fowarded headers + // Prefer "Forwarded" header as defined by rfc7239 if given + // see https://tools.ietf.org/html/rfc7239 + if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { + forwardedHeader, _, err := parseForwardedHeader(forwarded) + if err == nil { + if fproto := forwardedHeader["proto"]; len(fproto) > 0 { + scheme = fproto + } + if fhost := forwardedHeader["host"]; len(fhost) > 0 { + host = fhost + } } - } - - if len(portLessHost) > 0 { - host = portLessHost - } - if len(port) > 0 { - // remove enclosing brackets of ipv6 address otherwise they will be duplicated - if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] + } else { + if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { + scheme = forwardedProto + } + if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) } - // JoinHostPort properly encloses ipv6 addresses in square brackets - host = net.JoinHostPort(host, port) - } else if isIPv6Address(host) && host[0] != '[' { - // ipv6 needs to be enclosed in square brackets in urls - host = "[" + host + "]" } basePath := routeDescriptorsMap[RouteNameBase].Path @@ -290,28 +264,3 @@ func appendValues(u string, values ...url.Values) string { return appendValuesURL(up, values...).String() } - -// isIPv6Address returns true if given string is a valid IPv6 address. No port is allowed. The address may be -// enclosed in square brackets. -func isIPv6Address(host string) bool { - if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] - } - // The IPv6 scoped addressing zone identifier starts after the last percent sign. - if i := strings.LastIndexByte(host, '%'); i > 0 { - host = host[:i] - } - ip := net.ParseIP(host) - if ip == nil { - return false - } - if ip.To16() == nil { - return false - } - if ip.To4() == nil { - return true - } - // dot can be present in ipv4-mapped address, it needs to come after a colon though - i := strings.IndexAny(host, ":.") - return i >= 0 && host[i] == ':' -} diff --git a/registry/api/v2/urls_test.go b/registry/api/v2/urls_test.go index dae8354b..4f854b23 100644 --- a/registry/api/v2/urls_test.go +++ b/registry/api/v2/urls_test.go @@ -207,7 +207,7 @@ func TestBuilderFromRequest(t *testing.T) { { name: "https protocol forwarded with a non-standard header", request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "X-Forwarded-Proto": []string{"https"}, + "X-Custom-Forwarded-Proto": []string{"https"}, }}, base: "http://example.com", }, @@ -253,6 +253,7 @@ func TestBuilderFromRequest(t *testing.T) { { name: "forwarded port with a non-standard header", request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Host": []string{"example.com:5000"}, "X-Forwarded-Port": []string{"5000"}, }}, base: "http://example.com:5000", @@ -262,12 +263,13 @@ func TestBuilderFromRequest(t *testing.T) { request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ "X-Forwarded-Port": []string{"443 , 5001"}, }}, - base: "http://example.com:443", + base: "http://example.com", }, { name: "forwarded standard port with non-standard headers", request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Host": []string{"example.com"}, "X-Forwarded-Port": []string{"443"}, }}, base: "https://example.com", @@ -276,6 +278,7 @@ func TestBuilderFromRequest(t *testing.T) { name: "forwarded standard port with non-standard headers and explicit port", request: &http.Request{URL: u, Host: u.Host + ":443", Header: http.Header{ "X-Forwarded-Proto": []string{"https"}, + "X-Forwarded-Host": []string{u.Host + ":443"}, "X-Forwarded-Port": []string{"443"}, }}, base: "https://example.com:443", @@ -284,10 +287,9 @@ func TestBuilderFromRequest(t *testing.T) { name: "several non-standard headers", request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ "X-Forwarded-Proto": []string{"https"}, - "X-Forwarded-Host": []string{" first.example.com "}, - "X-Forwarded-Port": []string{" 12345 \t"}, + "X-Forwarded-Host": []string{" first.example.com:12345 "}, }}, - base: "http://first.example.com:12345", + base: "https://first.example.com:12345", }, { name: "forwarded host with port supplied takes priority", @@ -308,16 +310,16 @@ func TestBuilderFromRequest(t *testing.T) { { name: "forwarded protocol and addr using standard header", request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`proto=https;for="192.168.22.30:80"`}, + "Forwarded": []string{`proto=https;host="192.168.22.30:80"`}, }}, base: "https://192.168.22.30:80", }, { - name: "forwarded addr takes priority over host", + name: "forwarded host takes priority over for", request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`host=reg.example.com;for="192.168.22.30:5000"`}, + "Forwarded": []string{`host="reg.example.com:5000";for="192.168.22.30"`}, }}, - base: "http://192.168.22.30:5000", + base: "http://reg.example.com:5000", }, { name: "forwarded host and protocol using standard header", @@ -336,73 +338,26 @@ func TestBuilderFromRequest(t *testing.T) { { name: "process just the first list element of standard header", request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`for="reg.example.com:443";proto=https, for="reg.example.com:80";proto=http`}, + "Forwarded": []string{`host="reg.example.com:443";proto=https, host="reg.example.com:80";proto=http`}, }}, base: "https://reg.example.com:443", }, { - name: "IPv6 address override port", + name: "IPv6 address use host", request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`for="2607:f0d0:1002:51::4"`}, - "X-Forwarded-Port": []string{"5001"}, + "Forwarded": []string{`for="2607:f0d0:1002:51::4";host="[2607:f0d0:1002:51::4]:5001"`}, + "X-Forwarded-Port": []string{"5002"}, }}, base: "http://[2607:f0d0:1002:51::4]:5001", }, { name: "IPv6 address with port", request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`for="[2607:f0d0:1002:51::4]:4000"`}, + "Forwarded": []string{`host="[2607:f0d0:1002:51::4]:4000"`}, "X-Forwarded-Port": []string{"5001"}, }}, base: "http://[2607:f0d0:1002:51::4]:4000", }, - { - name: "IPv6 long address override port", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`for="2607:f0d0:1002:0051:0000:0000:0000:0004"`}, - "X-Forwarded-Port": []string{"5001"}, - }}, - base: "http://[2607:f0d0:1002:0051:0000:0000:0000:0004]:5001", - }, - { - name: "IPv6 long address enclosed in brackets - be benevolent", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`for="[2607:f0d0:1002:0051:0000:0000:0000:0004]"`}, - "X-Forwarded-Port": []string{"5001"}, - }}, - base: "http://[2607:f0d0:1002:0051:0000:0000:0000:0004]:5001", - }, - { - name: "IPv6 long address with port", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`for="[2607:f0d0:1002:0051:0000:0000:0000:0004]:4321"`}, - "X-Forwarded-Port": []string{"5001"}, - }}, - base: "http://[2607:f0d0:1002:0051:0000:0000:0000:0004]:4321", - }, - { - name: "IPv6 address with zone ID", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`for="fe80::bd0f:a8bc:6480:238b%11"`}, - "X-Forwarded-Port": []string{"5001"}, - }}, - base: "http://[fe80::bd0f:a8bc:6480:238b%2511]:5001", - }, - { - name: "IPv6 address with zone ID and port", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`for="[fe80::bd0f:a8bc:6480:238b%eth0]:12345"`}, - "X-Forwarded-Port": []string{"5001"}, - }}, - base: "http://[fe80::bd0f:a8bc:6480:238b%25eth0]:12345", - }, - { - name: "IPv6 address without port", - request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ - "Forwarded": []string{`for="::FFFF:129.144.52.38"`}, - }}, - base: "http://[::FFFF:129.144.52.38]", - }, { name: "non-standard and standard forward headers", request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ @@ -414,14 +369,34 @@ func TestBuilderFromRequest(t *testing.T) { base: "https://first.example.com", }, { - name: "non-standard headers take precedence over standard one", + name: "standard header takes precedence over non-standard headers", request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ "X-Forwarded-Proto": []string{`http`}, "Forwarded": []string{`host=second.example.com; proto=https`}, "X-Forwarded-Host": []string{`first.example.com`}, "X-Forwarded-Port": []string{`4000`}, }}, - base: "http://first.example.com:4000", + base: "https://second.example.com", + }, + { + name: "incomplete standard header uses default", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`https`}, + "Forwarded": []string{`for=127.0.0.1`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{`4000`}, + }}, + base: "http://" + u.Host, + }, + { + name: "standard with just proto", + request: &http.Request{URL: u, Host: u.Host, Header: http.Header{ + "X-Forwarded-Proto": []string{`https`}, + "Forwarded": []string{`proto=https`}, + "X-Forwarded-Host": []string{`first.example.com`}, + "X-Forwarded-Port": []string{`4000`}, + }}, + base: "https://" + u.Host, }, } @@ -444,23 +419,9 @@ func TestBuilderFromRequest(t *testing.T) { continue } - var expectedURL string - proto, ok := tr.request.Header["X-Forwarded-Proto"] - if !ok { - expectedURL = testCase.expectedPath - if !relative { - expectedURL = tr.base + expectedURL - } - } else { - urlBase, err := url.Parse(tr.base) - if err != nil { - t.Fatal(err) - } - urlBase.Scheme = proto[0] - expectedURL = testCase.expectedPath - if !relative { - expectedURL = urlBase.String() + expectedURL - } + expectedURL := testCase.expectedPath + if !relative { + expectedURL = tr.base + expectedURL } if buildURL != expectedURL { @@ -557,119 +518,3 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { } } } - -func TestIsIPv6Address(t *testing.T) { - for _, tc := range []struct { - name string - address string - isIPv6 bool - }{ - { - name: "IPv6 short address", - address: `2607:f0d0:1002:51::4`, - isIPv6: true, - }, - { - name: "IPv6 short address enclosed in brackets", - address: "[2607:f0d0:1002:51::4]", - isIPv6: true, - }, - { - name: "IPv6 address", - address: `2607:f0d0:1002:0051:0000:0000:0000:0004`, - isIPv6: true, - }, - { - name: "IPv6 address with numeric zone ID", - address: `fe80::bd0f:a8bc:6480:238b%11`, - isIPv6: true, - }, - { - name: "IPv6 address with device name as zone ID", - address: `fe80::bd0f:a8bc:6480:238b%eth0`, - isIPv6: true, - }, - { - name: "IPv6 address with device name as zone ID enclosed in brackets", - address: `[fe80::bd0f:a8bc:6480:238b%eth0]`, - isIPv6: true, - }, - { - name: "IPv4-mapped address", - address: "::FFFF:129.144.52.38", - isIPv6: true, - }, - { - name: "localhost", - address: "::1", - isIPv6: true, - }, - { - name: "localhost", - address: "::1", - isIPv6: true, - }, - { - name: "long localhost address", - address: "0:0:0:0:0:0:0:1", - isIPv6: true, - }, - { - name: "IPv6 long address with port", - address: "[2607:f0d0:1002:0051:0000:0000:0000:0004]:4321", - isIPv6: false, - }, - { - name: "too many groups", - address: "2607:f0d0:1002:0051:0000:0000:0000:0004:4321", - isIPv6: false, - }, - { - name: "square brackets don't make an IPv6 address", - address: "[2607:f0d0]", - isIPv6: false, - }, - { - name: "require two consecutive colons in localhost", - address: ":1", - isIPv6: false, - }, - { - name: "more then 4 hexadecimal digits", - address: "2607:f0d0b:1002:0051:0000:0000:0000:0004", - isIPv6: false, - }, - { - name: "too short address", - address: `2607:f0d0:1002:0000:0000:0000:0004`, - isIPv6: false, - }, - { - name: "IPv4 address", - address: `192.168.100.1`, - isIPv6: false, - }, - { - name: "unclosed bracket", - address: `[2607:f0d0:1002:0051:0000:0000:0000:0004`, - isIPv6: false, - }, - { - name: "trailing bracket", - address: `2607:f0d0:1002:0051:0000:0000:0000:0004]`, - isIPv6: false, - }, - { - name: "domain name", - address: `localhost`, - isIPv6: false, - }, - } { - isIPv6 := isIPv6Address(tc.address) - if isIPv6 && !tc.isIPv6 { - t.Errorf("[%s] address %q falsely detected as IPv6 address", tc.name, tc.address) - } else if !isIPv6 && tc.isIPv6 { - t.Errorf("[%s] address %q not recognized as IPv6", tc.name, tc.address) - } - } -} From 364d2e4a5bed94e9393dd88232ea3be29ca8f27a Mon Sep 17 00:00:00 2001 From: Santiago Torres Date: Wed, 5 Apr 2017 19:18:48 -0400 Subject: [PATCH 022/276] DOC: README: Fix broken link in migrator A stray space in the link for the migrator repository wasn't allowing it to be rendered properly in markdown viewers. Remove such a space to avoid this. Signed-off-by: Santiago Torres-Arias --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index a6e8db0f..99887885 100644 --- a/README.md +++ b/README.md @@ -76,8 +76,7 @@ may be the better choice. For those who have previously deployed their own registry based on the Registry 1.0 implementation and wish to deploy a Registry 2.0 while retaining images, data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator] -(https://github.com/docker/migrator). +created. For more information see [docker/migrator](https://github.com/docker/migrator). ## Contribute From b22c6b7a4ec53b40beb61fdf886c84d54ccfdbcd Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 10 Apr 2017 12:07:32 -0700 Subject: [PATCH 023/276] Update vendor directory to match expectation of vndr tool Adds READMEs and enforces vendor is done at repository root Signed-off-by: Derek McGowan --- vendor.conf | 6 +- .../Azure/azure-sdk-for-go/README.md | 116 +++ .../Azure/azure-sdk-for-go/storage/README.md | 5 + vendor/github.com/Sirupsen/logrus/README.md | 425 +++++++++++ vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 +++++ vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + vendor/github.com/aws/aws-sdk-go/README.md | 121 +++ .../aws/aws-sdk-go/private/README.md | 4 + .../logrus-logstash-hook/README.md | 95 +++ .../github.com/bugsnag/bugsnag-go/LICENSE.txt | 20 + .../github.com/bugsnag/bugsnag-go/README.md | 489 ++++++++++++ .../bugsnag/bugsnag-go/errors/README.md | 6 + vendor/github.com/bugsnag/panicwrap/README.md | 101 +++ .../denverdino/aliyungo/LICENSE.txt | 191 +++++ .../github.com/denverdino/aliyungo/README.md | 143 ++++ vendor/github.com/docker/goamz/README.md | 68 ++ vendor/github.com/docker/libtrust/README.md | 18 + .../garyburd/redigo/README.markdown | 44 ++ vendor/github.com/go-ini/ini/README.md | 709 ++++++++++++++++++ vendor/github.com/go-ini/ini/README_ZH.md | 696 +++++++++++++++++ vendor/github.com/golang/protobuf/README.md | 199 +++++ vendor/github.com/gorilla/context/README.md | 7 + vendor/github.com/gorilla/handlers/README.md | 52 ++ vendor/github.com/gorilla/mux/README.md | 7 + .../inconshreveable/mousetrap/README.md | 23 + .../github.com/jmespath/go-jmespath/README.md | 7 + vendor/github.com/miekg/dns/README.md | 154 ++++ .../mitchellh/mapstructure/README.md | 46 ++ vendor/github.com/ncw/swift/README.md | 140 ++++ .../opencontainers/go-digest/LICENSE.code | 191 +++++ .../opencontainers/go-digest/LICENSE.docs | 425 +++++++++++ .../opencontainers/go-digest/README.md | 104 +++ vendor/github.com/spf13/cobra/LICENSE.txt | 174 +++++ vendor/github.com/spf13/cobra/README.md | 485 ++++++++++++ vendor/github.com/spf13/pflag/README.md | 191 +++++ .../github.com/stevvooe/resumable/README.md | 6 + vendor/github.com/xenolf/lego/README.md | 248 ++++++ .../yvasiyarov/go-metrics/README.md | 104 +++ .../github.com/yvasiyarov/gorelic/README.md | 119 +++ .../yvasiyarov/newrelic_platform_go/README.md | 11 + vendor/golang.org/x/crypto/README | 3 + vendor/golang.org/x/net/README | 3 + vendor/golang.org/x/net/http2/README | 20 + vendor/golang.org/x/oauth2/README.md | 64 ++ vendor/golang.org/x/time/README | 1 + vendor/google.golang.org/api/README.md | 92 +++ vendor/google.golang.org/appengine/README.md | 73 ++ vendor/google.golang.org/cloud/README.md | 135 ++++ vendor/google.golang.org/grpc/README.md | 32 + vendor/gopkg.in/check.v1/README.md | 20 + vendor/gopkg.in/square/go-jose.v1/README.md | 209 ++++++ .../gopkg.in/square/go-jose.v1/json/README.md | 13 + vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + vendor/gopkg.in/yaml.v2/README.md | 128 ++++ vendor/rsc.io/letsencrypt/README | 177 +++++ 55 files changed, 7153 insertions(+), 3 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/README.md create mode 100644 vendor/github.com/Sirupsen/logrus/README.md create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/README.md create mode 100644 vendor/github.com/aws/aws-sdk-go/private/README.md create mode 100644 vendor/github.com/bshuster-repo/logrus-logstash-hook/README.md create mode 100644 vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt create mode 100644 vendor/github.com/bugsnag/bugsnag-go/README.md create mode 100644 vendor/github.com/bugsnag/bugsnag-go/errors/README.md create mode 100644 vendor/github.com/bugsnag/panicwrap/README.md create mode 100644 vendor/github.com/denverdino/aliyungo/LICENSE.txt create mode 100644 vendor/github.com/denverdino/aliyungo/README.md create mode 100644 vendor/github.com/docker/goamz/README.md create mode 100644 vendor/github.com/docker/libtrust/README.md create mode 100644 vendor/github.com/garyburd/redigo/README.markdown create mode 100644 vendor/github.com/go-ini/ini/README.md create mode 100644 vendor/github.com/go-ini/ini/README_ZH.md create mode 100644 vendor/github.com/golang/protobuf/README.md create mode 100644 vendor/github.com/gorilla/context/README.md create mode 100644 vendor/github.com/gorilla/handlers/README.md create mode 100644 vendor/github.com/gorilla/mux/README.md create mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md create mode 100644 vendor/github.com/jmespath/go-jmespath/README.md create mode 100644 vendor/github.com/miekg/dns/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/ncw/swift/README.md create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.code create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.docs create mode 100644 vendor/github.com/opencontainers/go-digest/README.md create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt create mode 100644 vendor/github.com/spf13/cobra/README.md create mode 100644 vendor/github.com/spf13/pflag/README.md create mode 100644 vendor/github.com/stevvooe/resumable/README.md create mode 100644 vendor/github.com/xenolf/lego/README.md create mode 100644 vendor/github.com/yvasiyarov/go-metrics/README.md create mode 100644 vendor/github.com/yvasiyarov/gorelic/README.md create mode 100644 vendor/github.com/yvasiyarov/newrelic_platform_go/README.md create mode 100644 vendor/golang.org/x/crypto/README create mode 100644 vendor/golang.org/x/net/README create mode 100644 vendor/golang.org/x/net/http2/README create mode 100644 vendor/golang.org/x/oauth2/README.md create mode 100644 vendor/golang.org/x/time/README create mode 100644 vendor/google.golang.org/api/README.md create mode 100644 vendor/google.golang.org/appengine/README.md create mode 100644 vendor/google.golang.org/cloud/README.md create mode 100644 vendor/google.golang.org/grpc/README.md create mode 100644 vendor/gopkg.in/check.v1/README.md create mode 100644 vendor/gopkg.in/square/go-jose.v1/README.md create mode 100644 vendor/gopkg.in/square/go-jose.v1/json/README.md create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 vendor/gopkg.in/yaml.v2/README.md create mode 100644 vendor/rsc.io/letsencrypt/README diff --git a/vendor.conf b/vendor.conf index 443b4bcf..85e5c94d 100644 --- a/vendor.conf +++ b/vendor.conf @@ -10,7 +10,7 @@ github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c -github.com/golang/protobuf/proto 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b github.com/gorilla/mux e444e69cbd2e2e3e0749a2f3c717cec491552bbf @@ -22,14 +22,14 @@ github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 github.com/stevvooe/resumable 51ad44105773cafcbe91927f70ac68e1bf78f8b4 -github.com/xenolf/lego/acme a9d8cec0e6563575e5868a005359ac97911b5985 +github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b golang.org/x/net 4876518f9e71663000c348837735820161a42df7 golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf -golang.org/x/time/rate a4bde12657593d5e90d0533a3e4fd95e635124cb +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/README.md b/vendor/github.com/Azure/azure-sdk-for-go/README.md new file mode 100644 index 00000000..13d3c4b8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/README.md @@ -0,0 +1,116 @@ +# Microsoft Azure SDK for Go + +This project provides various Go packages to perform operations +on Microsoft Azure REST APIs. + +[![GoDoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go) [![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go) + +> **NOTE:** This repository is under heavy ongoing development and +is likely to break over time. We currently do not have any releases +yet. If you are planning to use the repository, please consider vendoring +the packages in your project and update them when a stable tag is out. + +# Packages + +## Azure Resource Manager (ARM) + +[About ARM](/arm/README.md) + +- [analysisservices](/arm/analysisservices) +- [authorization](/arm/authorization) +- [batch](/arm/batch) +- [cdn](/arm/cdn) +- [cognitiveservices](/arm/cognitiveservices) +- [commerce](/arm/commerce) +- [compute](/arm/compute) +- [containerregistry](/arm/containerregistry) +- [containerservice](/arm/containerservice) +- [datalake-analytics/account](/arm/datalake-analytics/account) +- [datalake-store/account](/arm/datalake-store/account) +- [devtestlabs](/arm/devtestlabs) +- [dns](/arm/dns) +- [documentdb](/arm/documentdb) +- [eventhub](/arm/eventhub) +- [intune](/arm/intune) +- [iothub](/arm/iothub) +- [keyvault](/arm/keyvault) +- [logic](/arm/logic) +- [machinelearning/commitmentplans](/arm/machinelearning/commitmentplans) +- [machinelearning/webservices](/arm/machinelearning/webservices) +- [mediaservices](/arm/mediaservices) +- [mobileengagement](/arm/mobileengagement) +- [network](/arm/network) +- [notificationhubs](/arm/notificationhubs) +- [powerbiembedded](/arm/powerbiembedded) +- [recoveryservices](/arm/recoveryservices) +- [redis](/arm/redis) +- [resources/features](/arm/resources/features) +- [resources/links](/arm/resources/links) +- [resources/locks](/arm/resources/locks) +- [resources/policy](/arm/resources/policy) +- [resources/resources](/arm/resources/resources) +- [resources/subscriptions](/arm/resources/subscriptions) +- [scheduler](/arm/scheduler) +- [search](/arm/search) +- [servermanagement](/arm/servermanagement) +- [servicebus](/arm/servicebus) +- [sql](/arm/sql) +- [storage](/arm/storage) +- [trafficmanager](/arm/trafficmanager) +- [web](/arm/web) + +## Azure Service Management (ASM), aka classic deployment + +[About ASM](/management/README.md) + +- [affinitygroup](/management/affinitygroup) +- [hostedservice](/management/hostedservice) +- [location](/management/location) +- [networksecuritygroup](/management/networksecuritygroup) +- [osimage](/management/osimage) +- [sql](/management/sql) +- [storageservice](/management/storageservice) +- [virtualmachine](/management/virtualmachine) +- [virtualmachinedisk](/management/virtualmachinedisk) +- [virtualmachineimage](/management/virtualmachineimage) +- [virtualnetwork](/management/virtualnetwork) +- [vmutils](/management/vmutils) + +## Azure Storage SDK for Go + +[About Storage](/storage/README.md) + +- [storage](/storage) + +# Installation + +- [Install Go 1.7](https://golang.org/dl/). + +- Go get the SDK: + +``` +$ go get -d github.com/Azure/azure-sdk-for-go +``` + +> **IMPORTANT:** We highly suggest vendoring Azure SDK for Go as a dependency. For vendoring dependencies, Azure SDK for Go uses [glide](https://github.com/Masterminds/glide). If you haven't already, install glide. Navigate to your project directory and install the dependencies. + +``` +$ cd your/project +$ glide create +$ glide install +``` + +# Documentation + +Read the Godoc of the repository at [Godoc.org](http://godoc.org/github.com/Azure/azure-sdk-for-go/). + +# Contribute + +If you would like to become an active contributor to this project please follow the instructions provided in [Microsoft Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/). + +# License + +This project is published under [Apache 2.0 License](LICENSE). + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md new file mode 100644 index 00000000..0ab09984 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md @@ -0,0 +1,5 @@ +# Azure Storage SDK for Go + +The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package. + +This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) \ No newline at end of file diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md new file mode 100644 index 00000000..126cd1fc --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,425 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stderr instead of stdout, could also be a file. + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stderr + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +| Hook | Description | +| ----- | ----------- | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | +| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | +| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| +| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| +| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | +| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | +| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | + + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true` +* `logrus.JSONFormatter`. Logs fields as JSON. + +Third party logging formatters: + +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +logger, hook := NewNullLogger() +logger.Error("Hello error") + +assert.Equal(1, len(hook.Entries)) +assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) +assert.Equal("Hello error", hook.LastEntry().Message) + +hook.Reset() +assert.Nil(hook.LastEntry()) +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safty + +By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 00000000..5f14d116 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/README.md b/vendor/github.com/aws/aws-sdk-go/README.md new file mode 100644 index 00000000..df806f40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/README.md @@ -0,0 +1,121 @@ +# AWS SDK for Go + + +[![API Reference](http://img.shields.io/badge/api-reference-blue.svg)](http://docs.aws.amazon.com/sdk-for-go/api) +[![Join the chat at https://gitter.im/aws/aws-sdk-go](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/aws/aws-sdk-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Build Status](https://img.shields.io/travis/aws/aws-sdk-go.svg)](https://travis-ci.org/aws/aws-sdk-go) +[![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) + + +aws-sdk-go is the official AWS SDK for the Go programming language. + +Checkout our [release notes](https://github.com/aws/aws-sdk-go/releases) for information about the latest bug fixes, updates, and features added to the SDK. + +## Installing + +If you are using Go 1.5 with the `GO15VENDOREXPERIMENT=1` vendoring flag, or 1.6 and higher you can use the following command to retrieve the SDK. The SDK's non-testing dependencies will be included and are vendored in the `vendor` folder. + + go get -u github.com/aws/aws-sdk-go + +Otherwise if your Go environment does not have vendoring support enabled, or you do not want to include the vendored SDK's dependencies you can use the following command to retrieve the SDK and its non-testing dependencies using `go get`. + + go get -u github.com/aws/aws-sdk-go/aws/... + go get -u github.com/aws/aws-sdk-go/service/... + +If you're looking to retrieve just the SDK without any dependencies use the following command. + + go get -d github.com/aws/aws-sdk-go/ + +These two processes will still include the `vendor` folder and it should be deleted if its not going to be used by your environment. + + rm -rf $GOPATH/src/github.com/aws/aws-sdk-go/vendor + +## Reference Documentation +[`Getting Started Guide`](https://aws.amazon.com/sdk-for-go/) - This document is a general introduction how to configure and make requests with the SDK. If this is your first time using the SDK, this documentation and the API documentation will help you get started. This document focuses on the syntax and behavior of the SDK. The [Service Developer Guide](https://aws.amazon.com/documentation/) will help you get started using specific AWS services. + +[`SDK API Reference Documentation`](https://docs.aws.amazon.com/sdk-for-go/api/) - Use this document to look up all API operation input and output parameters for AWS services supported by the SDK. The API reference also includes documentation of the SDK, and examples how to using the SDK, service client API operations, and API operation require parameters. + +[`Service Developer Guide`](https://aws.amazon.com/documentation/) - Use this documentation to learn how to interface with an AWS service. These are great guides both, if you're getting started with a service, or looking for more information on a service. You should not need this document for coding, though in some cases, services may supply helpful samples that you might want to look out for. + +[`SDK Examples`](https://github.com/aws/aws-sdk-go/tree/master/example) - Included in the SDK's repo are a several hand crafted examples using the SDK features and AWS services. + +## Configuring Credentials + +Before using the SDK, ensure that you've configured credentials. The best +way to configure credentials on a development machine is to use the +`~/.aws/credentials` file, which might look like: + +``` +[default] +aws_access_key_id = AKID1234567890 +aws_secret_access_key = MY-SECRET-KEY +``` + +You can learn more about the credentials file from this +[blog post](http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs). + +Alternatively, you can set the following environment variables: + +``` +AWS_ACCESS_KEY_ID=AKID1234567890 +AWS_SECRET_ACCESS_KEY=MY-SECRET-KEY +``` + +### AWS shared config file (`~/.aws/config`) +The AWS SDK for Go added support the shared config file in release [v1.3.0](https://github.com/aws/aws-sdk-go/releases/tag/v1.3.0). You can opt into enabling support for the shared config by setting the environment variable `AWS_SDK_LOAD_CONFIG` to a truthy value. See the [Session](https://github.com/aws/aws-sdk-go/wiki/sessions) wiki for more information about this feature. + +## Using the Go SDK + +To use a service in the SDK, create a service variable by calling the `New()` +function. Once you have a service client, you can call API operations which each +return response data and a possible error. + +To list a set of instance IDs from EC2, you could run: + +```go +package main + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func main() { + sess, err := session.NewSession() + if err != nil { + panic(err) + } + + // Create an EC2 service object in the "us-west-2" region + // Note that you can also configure your region globally by + // exporting the AWS_REGION environment variable + svc := ec2.New(sess, &aws.Config{Region: aws.String("us-west-2")}) + + // Call the DescribeInstances Operation + resp, err := svc.DescribeInstances(nil) + if err != nil { + panic(err) + } + + // resp has all of the response data, pull out instance IDs: + fmt.Println("> Number of reservation sets: ", len(resp.Reservations)) + for idx, res := range resp.Reservations { + fmt.Println(" > Number of instances: ", len(res.Instances)) + for _, inst := range resp.Reservations[idx].Instances { + fmt.Println(" - Instance ID: ", *inst.InstanceId) + } + } +} +``` + +You can find more information and operations in our +[API documentation](http://docs.aws.amazon.com/sdk-for-go/api/). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE.txt and NOTICE.txt for more information. diff --git a/vendor/github.com/aws/aws-sdk-go/private/README.md b/vendor/github.com/aws/aws-sdk-go/private/README.md new file mode 100644 index 00000000..5bdb4c50 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/README.md @@ -0,0 +1,4 @@ +## AWS SDK for Go Private packages ## +`private` is a collection of packages used internally by the SDK, and is subject to have breaking changes. This package is not `internal` so that if you really need to use its functionality, and understand breaking changes will be made, you are able to. + +These packages will be refactored in the future so that the API generator and model parsers are exposed cleanly on their own. Making it easier for you to generate your own code based on the API models. diff --git a/vendor/github.com/bshuster-repo/logrus-logstash-hook/README.md b/vendor/github.com/bshuster-repo/logrus-logstash-hook/README.md new file mode 100644 index 00000000..0f281b50 --- /dev/null +++ b/vendor/github.com/bshuster-repo/logrus-logstash-hook/README.md @@ -0,0 +1,95 @@ +# Logstash hook for logrus :walrus: [![Build Status](https://travis-ci.org/bshuster-repo/logrus-logstash-hook.svg?branch=master)](https://travis-ci.org/bshuster-repo/logrus-logstash-hook) +Use this hook to send the logs to [Logstash](https://www.elastic.co/products/logstash) over both UDP and TCP. + +## Usage + +```go +package main + +import ( + "github.com/Sirupsen/logrus" + "github.com/bshuster-repo/logrus-logstash-hook" +) + +func main() { + log := logrus.New() + hook, err := logrus_logstash.NewHook("tcp", "172.17.0.2:9999", "myappName") + + if err != nil { + log.Fatal(err) + } + log.Hooks.Add(hook) + ctx := log.WithFields(logrus.Fields{ + "method": "main", + }) + ... + ctx.Info("Hello World!") +} +``` + +This is how it will look like: + +```ruby +{ + "@timestamp" => "2016-02-29T16:57:23.000Z", + "@version" => "1", + "level" => "info", + "message" => "Hello World!", + "method" => "main", + "host" => "172.17.0.1", + "port" => 45199, + "type" => "myappName" +} +``` +## Hook Fields +Fields can be added to the hook, which will always be in the log context. +This can be done when creating the hook: + +```go + +hook, err := logrus_logstash.NewHookWithFields("tcp", "172.17.0.2:9999", "myappName", logrus.Fields{ + "hostname": os.Hostname(), + "serviceName": "myServiceName", +}) +``` + +Or afterwards: + +```go + +hook.WithFields(logrus.Fields{ + "hostname": os.Hostname(), + "serviceName": "myServiceName", +}) +``` +This allows you to set up the hook so logging is available immediately, and add important fields as they become available. + +Single fields can be added/updated using 'WithField': + +```go + +hook.WithField("status", "running") +``` + + + +## Field prefix + +The hook allows you to send logging to logstash and also retain the default std output in text format. +However to keep this console output readable some fields might need to be omitted from the default non-hooked log output. +Each hook can be configured with a prefix used to identify fields which are only to be logged to the logstash connection. +For example if you don't want to see the hostname and serviceName on each log line in the console output you can add a prefix: + +```go + + +hook, err := logrus_logstash.NewHookWithFields("tcp", "172.17.0.2:9999", "myappName", logrus.Fields{ + "_hostname": os.Hostname(), + "_serviceName": "myServiceName", +}) +... +hook.WithPrefix("_") +``` + +There are also constructors available which allow you to specify the prefix from the start. +The std-out will not have the '\_hostname' and '\_servicename' fields, and the logstash output will, but the prefix will be dropped from the name. diff --git a/vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt b/vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt new file mode 100644 index 00000000..3cb0ec0f --- /dev/null +++ b/vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2014 Bugsnag + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/bugsnag/bugsnag-go/README.md b/vendor/github.com/bugsnag/bugsnag-go/README.md new file mode 100644 index 00000000..b5432293 --- /dev/null +++ b/vendor/github.com/bugsnag/bugsnag-go/README.md @@ -0,0 +1,489 @@ +Bugsnag Notifier for Golang +=========================== + +The Bugsnag Notifier for Golang gives you instant notification of panics, or +unexpected errors, in your golang app. Any unhandled panics will trigger a +notification to be sent to your Bugsnag project. + +[Bugsnag](http://bugsnag.com) captures errors in real-time from your web, +mobile and desktop applications, helping you to understand and resolve them +as fast as possible. [Create a free account](http://bugsnag.com) to start +capturing exceptions from your applications. + +## How to Install + +1. Download the code + + ```shell + go get github.com/bugsnag/bugsnag-go + ``` + +### Using with net/http apps + +For a golang app based on [net/http](https://godoc.org/net/http), integrating +Bugsnag takes two steps. You should also use these instructions if you're using +the [gorilla toolkit](http://www.gorillatoolkit.org/), or the +[pat](https://github.com/bmizerany/pat/) muxer. + +1. Configure bugsnag at the start of your `main()` function: + + ```go + import "github.com/bugsnag/bugsnag-go" + + func main() { + bugsnag.Configure(bugsnag.Configuration{ + APIKey: "YOUR_API_KEY_HERE", + ReleaseStage: "production", + // more configuration options + }) + + // rest of your program. + } + ``` + +2. Wrap your server in a [bugsnag.Handler](https://godoc.org/github.com/bugsnag/bugsnag-go/#Handler) + + ```go + // a. If you're using the builtin http mux, you can just pass + // bugsnag.Handler(nil) to http.ListenAndServer + http.ListenAndServe(":8080", bugsnag.Handler(nil)) + + // b. If you're creating a server manually yourself, you can set + // its handlers the same way + srv := http.Server{ + Handler: bugsnag.Handler(nil) + } + + // c. If you're not using the builtin http mux, wrap your own handler + // (though make sure that it doesn't already catch panics) + http.ListenAndServe(":8080", bugsnag.Handler(handler)) + ``` + +### Using with Revel apps + +There are two steps to get panic handling in [revel](https://revel.github.io) apps. + +1. Add the `bugsnagrevel.Filter` immediately after the `revel.PanicFilter` in `app/init.go`: + + ```go + + import "github.com/bugsnag/bugsnag-go/revel" + + revel.Filters = []revel.Filter{ + revel.PanicFilter, + bugsnagrevel.Filter, + // ... + } + ``` + +2. Set bugsnag.apikey in the top section of `conf/app.conf`. + + ``` + module.static=github.com/revel/revel/modules/static + + bugsnag.apikey=YOUR_API_KEY_HERE + + [dev] + ``` + +### Using with Google App Engine + +1. Configure bugsnag at the start of your `init()` function: + + ```go + import "github.com/bugsnag/bugsnag-go" + + func init() { + bugsnag.Configure(bugsnag.Configuration{ + APIKey: "YOUR_API_KEY_HERE", + }) + + // ... + } + ``` + +2. Wrap *every* http.Handler or http.HandlerFunc with Bugsnag: + + ```go + // a. If you're using HandlerFuncs + http.HandleFunc("/", bugsnag.HandlerFunc( + func (w http.ResponseWriter, r *http.Request) { + // ... + })) + + // b. If you're using Handlers + http.Handle("/", bugsnag.Handler(myHttpHandler)) + ``` + +3. In order to use Bugsnag, you must provide the current +[`appengine.Context`](https://developers.google.com/appengine/docs/go/reference#Context), or +current `*http.Request` as rawData. The easiest way to do this is to create a new notifier. + + ```go + c := appengine.NewContext(r) + notifier := bugsnag.New(c) + + if err != nil { + notifier.Notify(err) + } + + go func () { + defer notifier.Recover() + + // ... + }() + ``` + + +## Notifying Bugsnag manually + +Bugsnag will automatically handle any panics that crash your program and notify +you of them. If you've integrated with `revel` or `net/http`, then you'll also +be notified of any panics() that happen while processing a request. + +Sometimes however it's useful to manually notify Bugsnag of a problem. To do this, +call [`bugsnag.Notify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Notify) + +```go +if err != nil { + bugsnag.Notify(err) +} +``` + +### Manual panic handling + +To avoid a panic in a goroutine from crashing your entire app, you can use +[`bugsnag.Recover()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover) +to stop a panic from unwinding the stack any further. When `Recover()` is hit, +it will send any current panic to Bugsnag and then stop panicking. This is +most useful at the start of a goroutine: + +```go +go func() { + defer bugsnag.Recover() + + // ... +}() +``` + +Alternatively you can use +[`bugsnag.AutoNotify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover) +to notify bugsnag of a panic while letting the program continue to panic. This +is useful if you're using a Framework that already has some handling of panics +and you are retrofitting bugsnag support. + +```go +defer bugsnag.AutoNotify() +``` + +## Sending Custom Data + +Most functions in the Bugsnag API, including `bugsnag.Notify()`, +`bugsnag.Recover()`, `bugsnag.AutoNotify()`, and `bugsnag.Handler()` let you +attach data to the notifications that they send. To do this you pass in rawData, +which can be any of the supported types listed here. To add support for more +types of rawData see [OnBeforeNotify](#custom-data-with-onbeforenotify). + +### Custom MetaData + +Custom metaData appears as tabs on Bugsnag.com. You can set it by passing +a [`bugsnag.MetaData`](https://godoc.org/github.com/bugsnag/bugsnag-go/#MetaData) +object as rawData. + +```go +bugsnag.Notify(err, + bugsnag.MetaData{ + "Account": { + "Name": Account.Name, + "Paying": Account.Plan.Premium, + }, + }) +``` + +### Request data + +Bugsnag can extract interesting data from +[`*http.Request`](https://godoc.org/net/http/#Request) objects, and +[`*revel.Controller`](https://godoc.org/github.com/revel/revel/#Controller) +objects. These are automatically passed in when handling panics, and you can +pass them yourself. + +```go +func (w http.ResponseWriter, r *http.Request) { + bugsnag.Notify(err, r) +} +``` + +### User data + +User data is searchable, and the `Id` powers the count of users affected. You +can set which user an error affects by passing a +[`bugsnag.User`](https://godoc.org/github.com/bugsnag/bugsnag-go/#User) object as +rawData. + +```go +bugsnag.Notify(err, + bugsnag.User{Id: "1234", Name: "Conrad", Email: "me@cirw.in"}) +``` + +### Context + +The context shows up prominently in the list view so that you can get an idea +of where a problem occurred. You can set it by passing a +[`bugsnag.Context`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Context) +object as rawData. + +```go +bugsnag.Notify(err, bugsnag.Context{"backgroundJob"}) +``` + +### Severity + +Bugsnag supports three severities, `SeverityError`, `SeverityWarning`, and `SeverityInfo`. +You can set the severity of an error by passing one of these objects as rawData. + +```go +bugsnag.Notify(err, bugsnag.SeverityInfo) +``` + +## Configuration + +You must call `bugsnag.Configure()` at the start of your program to use Bugsnag, you pass it +a [`bugsnag.Configuration`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Configuration) object +containing any of the following values. + +### APIKey + +The Bugsnag API key can be found on your [Bugsnag dashboard](https://bugsnag.com) under "Settings". + +```go +bugsnag.Configure(bugsnag.Configuration{ + APIKey: "YOUR_API_KEY_HERE", +}) +``` + +### Endpoint + +The Bugsnag endpoint defaults to `https://notify.bugsnag.com/`. If you're using Bugsnag enterprise, +you should set this to the endpoint of your local instance. + +```go +bugsnag.Configure(bugsnag.Configuration{ + Endpoint: "http://bugsnag.internal:49000/", +}) +``` + +### ReleaseStage + +The ReleaseStage tracks where your app is deployed. You should set this to `production`, `staging`, +`development` or similar as appropriate. + +```go +bugsnag.Configure(bugsnag.Configuration{ + ReleaseStage: "development", +}) +``` + +### NotifyReleaseStages + +The list of ReleaseStages to notify in. By default Bugsnag will notify you in all release stages, but +you can use this to silence development errors. + +```go +bugsnag.Configure(bugsnag.Configuration{ + NotifyReleaseStages: []string{"production", "staging"}, +}) +``` + +### AppVersion + +If you use a versioning scheme for deploys of your app, Bugsnag can use the `AppVersion` to only +re-open errors if they occur in later version of the app. + +```go +bugsnag.Configure(bugsnag.Configuration{ + AppVersion: "1.2.3", +}) +``` + +### Hostname + +The hostname is used to track where exceptions are coming from in the Bugsnag dashboard. The +default value is obtained from `os.Hostname()` so you won't often need to change this. + +```go +bugsnag.Configure(bugsnag.Configuration{ + Hostname: "go1", +}) +``` + +### ProjectPackages + +In order to determine where a crash happens Bugsnag needs to know which packages you consider to +be part of your app (as opposed to a library). By default this is set to `[]string{"main*"}`. Strings +are matched to package names using [`filepath.Match`](http://godoc.org/path/filepath#Match). + +```go +bugsnag.Configure(bugsnag.Configuration{ + ProjectPackages: []string{"main", "github.com/domain/myapp/*"}, +} +``` + +### ParamsFilters + +Sometimes sensitive data is accidentally included in Bugsnag MetaData. You can remove it by +setting `ParamsFilters`. Any key in the `MetaData` that includes any string in the filters +will be redacted. The default is `[]string{"password", "secret"}`, which prevents fields like +`password`, `password_confirmation` and `secret_answer` from being sent. + +```go +bugsnag.Configure(bugsnag.Configuration{ + ParamsFilters: []string{"password", "secret"}, +} +``` + +### Logger + +The Logger to write to in case of an error inside Bugsnag. This defaults to the global logger. + +```go +bugsnag.Configure(bugsnag.Configuration{ + Logger: app.Logger, +} +``` + +### PanicHandler + +The first time Bugsnag is configured, it wraps the running program in a panic +handler using [panicwrap](http://godoc.org/github.com/ConradIrwin/panicwrap). This +forks a sub-process which monitors unhandled panics. To prevent this, set +`PanicHandler` to `func() {}` the first time you call +`bugsnag.Configure`. This will prevent bugsnag from being able to notify you about +unhandled panics. + +```go +bugsnag.Configure(bugsnag.Configuration{ + PanicHandler: func() {}, +}) +``` + +### Synchronous + +Bugsnag usually starts a new goroutine before sending notifications. This means +that notifications can be lost if you do a bugsnag.Notify and then immediately +os.Exit. To avoid this problem, set Bugsnag to Synchronous (or just `panic()` +instead ;). + +```go +bugsnag.Configure(bugsnag.Configuration{ + Synchronous: true +}) +``` + +Or just for one error: + +```go +bugsnag.Notify(err, bugsnag.Configuration{Synchronous: true}) +``` + +### Transport + +The transport configures how Bugsnag makes http requests. By default we use +[`http.DefaultTransport`](http://godoc.org/net/http#RoundTripper) which handles +HTTP proxies automatically using the `$HTTP_PROXY` environment variable. + +```go +bugsnag.Configure(bugsnag.Configuration{ + Transport: http.DefaultTransport, +}) +``` + +## Custom data with OnBeforeNotify + +While it's nice that you can pass `MetaData` directly into `bugsnag.Notify`, +`bugsnag.AutoNotify`, and `bugsnag.Recover`, this can be a bit cumbersome and +inefficient — you're constructing the meta-data whether or not it will actually +be used. A better idea is to pass raw data in to these functions, and add an +`OnBeforeNotify` filter that converts them into `MetaData`. + +For example, lets say our system processes jobs: + +```go +type Job struct{ + Retry bool + UserId string + UserEmail string + Name string + Params map[string]string +} +``` + +You can pass a job directly into Bugsnag.notify: + +```go +bugsnag.Notify(err, job) +``` + +And then add a filter to extract information from that job and attach it to the +Bugsnag event: + +```go +bugsnag.OnBeforeNotify( + func(event *bugsnag.Event, config *bugsnag.Configuration) error { + + // Search all the RawData for any *Job pointers that we're passed in + // to bugsnag.Notify() and friends. + for _, datum := range event.RawData { + if job, ok := datum.(*Job); ok { + // don't notify bugsnag about errors in retries + if job.Retry { + return fmt.Errorf("not notifying about retried jobs") + } + + // add the job as a tab on Bugsnag.com + event.MetaData.AddStruct("Job", job) + + // set the user correctly + event.User = &User{Id: job.UserId, Email: job.UserEmail} + } + } + + // continue notifying as normal + return nil + }) +``` + +## Advanced Usage + +If you want to have multiple different configurations around in one program, +you can use `bugsnag.New()` to create multiple independent instances of +Bugsnag. You can use these without calling `bugsnag.Configure()`, but bear in +mind that until you call `bugsnag.Configure()` unhandled panics will not be +sent to bugsnag. + +```go +notifier := bugsnag.New(bugsnag.Configuration{ + APIKey: "YOUR_OTHER_API_KEY", +}) +``` + +In fact any place that lets you pass in `rawData` also allows you to pass in +configuration. For example to send http errors to one bugsnag project, you +could do: + +```go +bugsnag.Handler(nil, bugsnag.Configuration{APIKey: "YOUR_OTHER_API_KEY"}) +``` + +### GroupingHash + +If you need to override Bugsnag's grouping algorithm, you can set the +`GroupingHash` in an `OnBeforeNotify`: + +```go +bugsnag.OnBeforeNotify( + func (event *bugsnag.Event, config *bugsnag.Configuration) error { + event.GroupingHash = calculateGroupingHash(event) + return nil + }) +``` diff --git a/vendor/github.com/bugsnag/bugsnag-go/errors/README.md b/vendor/github.com/bugsnag/bugsnag-go/errors/README.md new file mode 100644 index 00000000..8d8e097a --- /dev/null +++ b/vendor/github.com/bugsnag/bugsnag-go/errors/README.md @@ -0,0 +1,6 @@ +Adds stacktraces to errors in golang. + +This was made to help build the Bugsnag notifier but can be used standalone if +you like to have stacktraces on errors. + +See [Godoc](https://godoc.org/github.com/bugsnag/bugsnag-go/errors) for the API docs. diff --git a/vendor/github.com/bugsnag/panicwrap/README.md b/vendor/github.com/bugsnag/panicwrap/README.md new file mode 100644 index 00000000..d0a59675 --- /dev/null +++ b/vendor/github.com/bugsnag/panicwrap/README.md @@ -0,0 +1,101 @@ +# panicwrap + +panicwrap is a Go library that re-executes a Go binary and monitors stderr +output from the binary for a panic. When it find a panic, it executes a +user-defined handler function. Stdout, stderr, stdin, signals, and exit +codes continue to work as normal, making the existence of panicwrap mostly +invisble to the end user until a panic actually occurs. + +Since a panic is truly a bug in the program meant to crash the runtime, +globally catching panics within Go applications is not supposed to be possible. +Despite this, it is often useful to have a way to know when panics occur. +panicwrap allows you to do something with these panics, such as writing them +to a file, so that you can track when panics occur. + +panicwrap is ***not a panic recovery system***. Panics indicate serious +problems with your application and _should_ crash the runtime. panicwrap +is just meant as a way to monitor for panics. If you still think this is +the worst idea ever, read the section below on why. + +## Features + +* **SIMPLE!** +* Works with all Go applications on all platforms Go supports +* Custom behavior when a panic occurs +* Stdout, stderr, stdin, exit codes, and signals continue to work as + expected. + +## Usage + +Using panicwrap is simple. It behaves a lot like `fork`, if you know +how that works. A basic example is shown below. + +Because it would be sad to panic while capturing a panic, it is recommended +that the handler functions for panicwrap remain relatively simple and well +tested. panicwrap itself contains many tests. + +```go +package main + +import ( + "fmt" + "github.com/mitchellh/panicwrap" + "os" +) + +func main() { + exitStatus, err := panicwrap.BasicWrap(panicHandler) + if err != nil { + // Something went wrong setting up the panic wrapper. Unlikely, + // but possible. + panic(err) + } + + // If exitStatus >= 0, then we're the parent process and the panicwrap + // re-executed ourselves and completed. Just exit with the proper status. + if exitStatus >= 0 { + os.Exit(exitStatus) + } + + // Otherwise, exitStatus < 0 means we're the child. Continue executing as + // normal... + + // Let's say we panic + panic("oh shucks") +} + +func panicHandler(output string) { + // output contains the full output (including stack traces) of the + // panic. Put it in a file or something. + fmt.Printf("The child panicked:\n\n%s\n", output) + os.Exit(1) +} +``` + +## How Does it Work? + +panicwrap works by re-executing the running program (retaining arguments, +environmental variables, etc.) and monitoring the stderr of the program. +Since Go always outputs panics in a predictable way with a predictable +exit code, panicwrap is able to reliably detect panics and allow the parent +process to handle them. + +## WHY?! Panics should CRASH! + +Yes, panics _should_ crash. They are 100% always indicative of bugs. +However, in some cases, such as user-facing programs (programs like +[Packer](http://github.com/mitchellh/packer) or +[Docker](http://github.com/dotcloud/docker)), it is up to the user to +report such panics. This is unreliable, at best, and it would be better if the +program could have a way to automatically report panics. panicwrap provides +a way to do this. + +For backend applications, it is easier to detect crashes (since the application +exits). However, it is still nice sometimes to more intelligently log +panics in some way. For example, at [HashiCorp](http://www.hashicorp.com), +we use panicwrap to log panics to timestamped files with some additional +data (configuration settings at the time, environmental variables, etc.) + +The goal of panicwrap is _not_ to hide panics. It is instead to provide +a clean mechanism for handling them before bubbling the up to the user +and ultimately crashing. diff --git a/vendor/github.com/denverdino/aliyungo/LICENSE.txt b/vendor/github.com/denverdino/aliyungo/LICENSE.txt new file mode 100644 index 00000000..91829713 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/LICENSE.txt @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015-2015 Li Yi (denverdino@gmail.com). + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/denverdino/aliyungo/README.md b/vendor/github.com/denverdino/aliyungo/README.md new file mode 100644 index 00000000..fd9da9b1 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/README.md @@ -0,0 +1,143 @@ +# AliyunGo: Go SDK for Aliyun Services + +This is an unofficial Go SDK for Aliyun Services. You are welcome for contribution. + + +## Package Structure + +* ecs: [Elastic Compute Service](https://help.aliyun.com/document_detail/ecs/open-api/summary.html) +* oss: [Open Storage Service](https://help.aliyun.com/document_detail/oss/api-reference/abstract.html) +* slb: [Server Load Balancer](https://help.aliyun.com/document_detail/slb/api-reference/brief-introduction.html) +* dns: [DNS](https://help.aliyun.com/document_detail/dns/api-reference/summary.html) +* sls: [Logging Service](https://help.aliyun.com/document_detail/sls/api/overview.html) +* ram: [Resource Access Management](https://help.aliyun.com/document_detail/ram/ram-api-reference/intro/intro.html) +* rds: [Relational Database Service](https://help.aliyun.com/document_detail/26226.html) +* cms: [Cloud Monitor Service](https://help.aliyun.com/document_detail/28615.html) +* sts: [Security Token Service](https://help.aliyun.com/document_detail/28756.html) +* dm: [Direct Mail] +(https://help.aliyun.com/document_detail/29414.html) +* common: Common libary of Aliyun Go SDK +* util: Utility helpers + + + +## Quick Start + +```go +package main + +import ( + "fmt" + + "github.com/denverdino/aliyungo/ecs" +) + +const ACCESS_KEY_ID = "" +const ACCESS_KEY_SECRET = "<****>" + +func main() { + client := ecs.NewClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET) + fmt.Print(client.DescribeRegions()) +} + +``` + +## Documentation + + * ECS: [https://godoc.org/github.com/denverdino/aliyungo/ecs](https://godoc.org/github.com/denverdino/aliyungo/ecs) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/ecs?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/ecs) + * OSS: [https://godoc.org/github.com/denverdino/aliyungo/oss](https://godoc.org/github.com/denverdino/aliyungo/oss) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/oss?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/oss) + * SLB: [https://godoc.org/github.com/denverdino/aliyungo/slb](https://godoc.org/github.com/denverdino/aliyungo/slb) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/slb?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/slb) + * DNS: [https://godoc.org/github.com/denverdino/aliyungo/dns](https://godoc.org/github.com/denverdino/aliyungo/dns) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/dns?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/dns) + * SLS: [https://godoc.org/github.com/denverdino/aliyungo/sls](https://godoc.org/github.com/denverdino/aliyungo/sls) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/sls?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/sls) + * RAM: [https://godoc.org/github.com/denverdino/aliyungo/ram](https://godoc.org/github.com/denverdino/aliyungo/ram) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/ram?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/ram) + * RDS: [https://godoc.org/github.com/denverdino/aliyungo/rds](https://godoc.org/github.com/denverdino/aliyungo/rds) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/rds?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/rds) + * CMS: [https://godoc.org/github.com/denverdino/aliyungo/cms](https://godoc.org/github.com/denverdino/aliyungo/cms) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/cms?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/cms) + * STS: [https://godoc.org/github.com/denverdino/aliyungo/sts](https://godoc.org/github.com/denverdino/aliyungo/sts) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/sts?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/sts) + * DM: [https://godoc.org/github.com/denverdino/aliyungo/dm](https://godoc.org/github.com/denverdino/aliyungo/dm) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/dm?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/dm) + +## Build and Install + +go get: + +```sh +go get github.com/denverdino/aliyungo +``` + + +## Test ECS + +Modify "ecs/config_test.go" + +```sh + TestAccessKeyId = "MY_ACCESS_KEY_ID" + TestAccessKeySecret = "MY_ACCESS_KEY_ID" + TestInstanceId = "MY_INSTANCE_ID" + TestIAmRich = false +``` + +* TestAccessKeyId: the Access Key Id +* TestAccessKeySecret: the Access Key Secret. +* TestInstanceId: the existing instance id for testing. It will be stopped and restarted during testing. +* TestIAmRich(Optional): If it is set to true, it will perform tests to create virtual machines and disks under your account. And you will pay the bill. :-) + +Under "ecs" and run + +```sh +go test +``` + +## Test OSS + +Modify "oss/config_test.go" + +```sh + TestAccessKeyId = "MY_ACCESS_KEY_ID" + TestAccessKeySecret = "MY_ACCESS_KEY_ID" + TestRegion = oss.Beijing + TestBucket = "denverdino" +``` + +* TestAccessKeyId: the Access Key Id +* TestAccessKeySecret: the Access Key Secret. +* TestRegion: the region of OSS for testing +* TestBucket: the bucket name for testing + + +Under "oss" and run + +```sh +go test +``` + +## Contributors + + * Li Yi (denverdino@gmail.com) + * tgic (farmer1992@gmail.com) + * Yu Zhou (oscarrr110@gmail.com) + * Yufei Zhang + * linuxlikerqq + * Changhai Yan (changhai.ych@alibaba-inc.com) + * Jizhong Jiang (jiangjizhong@gmail.com) + * Kent Wang (pragkent@gmail.com) + * ringtail (zhongwei.lzw@alibaba-inc.com) + * aiden0z (aiden0xz@gmail.com) + * jimmycmh + * menglingwei + * mingang.he (dustgle@gmail.com) + * chainone (chainone@gmail.com) + * johnzeng + +## License +This project is licensed under the Apache License, Version 2.0. See [LICENSE](https://github.com/denverdino/aliyungo/blob/master/LICENSE.txt) for the full license text. + + +## Related projects + + * Aliyun ECS driver for Docker Machine: [Pull request](https://github.com/docker/machine/pull/1182) + + * Aliyun OSS driver for Docker Registry V2: [Pull request](https://github.com/docker/distribution/pull/514) + + +## References + +The GO API design of OSS refer the implementation from [https://github.com/AdRoll/goamz](https://github.com/AdRoll) diff --git a/vendor/github.com/docker/goamz/README.md b/vendor/github.com/docker/goamz/README.md new file mode 100644 index 00000000..9d03b466 --- /dev/null +++ b/vendor/github.com/docker/goamz/README.md @@ -0,0 +1,68 @@ +# News +We are putting together plans for future changes. We obviously depend on all of you to take part in the planning for the future of goamz and execution of the plans. Other than the regulare 'issues' and 'pull requests' please also have a look at TODO.md. + +It is inevitable that there will be backward-*in*compatible changes. Please subscribe to the google group to get all the news (it will only be used for announcements, all the technical discussions will happen on github). + +Google group: https://groups.google.com/forum/#!forum/goamz-announcements + + + +# GoAMZ + +[![Build Status](https://travis-ci.org/docker/goamz.png?branch=master)](https://travis-ci.org/docker/goamz) + +The _goamz_ package enables Go programs to interact with Amazon Web Services. + +This is a fork of the version [developed within Canonical](https://wiki.ubuntu.com/goamz) with additional functionality and services from [a number of contributors](https://github.com/docker/goamz/contributors)! + +The API of AWS is very comprehensive, though, and goamz doesn't even scratch the surface of it. That said, it's fairly well tested, and is the foundation in which further calls can easily be integrated. We'll continue extending the API as necessary - Pull Requests are _very_ welcome! + +The following packages are available at the moment: + +``` +github.com/docker/goamz/aws +github.com/docker/goamz/cloudwatch +github.com/docker/goamz/dynamodb +github.com/docker/goamz/ec2 +github.com/docker/goamz/elb +github.com/docker/goamz/iam +github.com/docker/goamz/kinesis +github.com/docker/goamz/s3 +github.com/docker/goamz/sqs +github.com/docker/goamz/sns + +github.com/docker/goamz/exp/mturk +github.com/docker/goamz/exp/sdb +github.com/docker/goamz/exp/ses +``` + +Packages under `exp/` are still in an experimental or unfinished/unpolished state. + +## API documentation + +The API documentation is currently available at: + +[http://godoc.org/github.com/docker/goamz](http://godoc.org/github.com/docker/goamz) + +## How to build and install goamz + +Just use `go get` with any of the available packages. For example: + +* `$ go get github.com/docker/goamz/ec2` +* `$ go get github.com/docker/goamz/s3` + +## Running tests + +To run tests, first install gocheck with: + +`$ go get launchpad.net/gocheck` + +Then run go test as usual: + +`$ go test github.com/docker/goamz/...` + +_Note:_ running all tests with the command `go test ./...` will currently fail as tests do not tear down their HTTP listeners. + +If you want to run integration tests (costs money), set up the EC2 environment variables as usual, and run: + +`$ gotest -i` diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md new file mode 100644 index 00000000..8e7db381 --- /dev/null +++ b/vendor/github.com/docker/libtrust/README.md @@ -0,0 +1,18 @@ +# libtrust + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/garyburd/redigo/README.markdown b/vendor/github.com/garyburd/redigo/README.markdown new file mode 100644 index 00000000..ab42c315 --- /dev/null +++ b/vendor/github.com/garyburd/redigo/README.markdown @@ -0,0 +1,44 @@ +Redigo +====== + +Redigo is a [Go](http://golang.org/) client for the [Redis](http://redis.io/) database. + +Features +------- + +* A [Print-like](http://godoc.org/github.com/garyburd/redigo/redis#hdr-Executing_Commands) API with support for all Redis commands. +* [Pipelining](http://godoc.org/github.com/garyburd/redigo/redis#hdr-Pipelining), including pipelined transactions. +* [Publish/Subscribe](http://godoc.org/github.com/garyburd/redigo/redis#hdr-Publish_and_Subscribe). +* [Connection pooling](http://godoc.org/github.com/garyburd/redigo/redis#Pool). +* [Script helper type](http://godoc.org/github.com/garyburd/redigo/redis#Script) with optimistic use of EVALSHA. +* [Helper functions](http://godoc.org/github.com/garyburd/redigo/redis#hdr-Reply_Helpers) for working with command replies. + +Documentation +------------- + +- [API Reference](http://godoc.org/github.com/garyburd/redigo/redis) +- [FAQ](https://github.com/garyburd/redigo/wiki/FAQ) + +Installation +------------ + +Install Redigo using the "go get" command: + + go get github.com/garyburd/redigo/redis + +The Go distribution is Redigo's only dependency. + +Contributing +------------ + +Contributions are welcome. + +Before writing code, send mail to gary@beagledreams.com to discuss what you +plan to do. This gives me a chance to validate the design, avoid duplication of +effort and ensure that the changes fit the goals of the project. Do not start +the discussion with a pull request. + +License +------- + +Redigo is available under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 00000000..b1536c8b --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,709 @@ +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte` or file) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many data sources as you want**. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +#### Ignore cases of key name + +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 and sec2 are the exactly same section object +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 and key2 are the exactly same key object +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### MySQL-like boolean key + +MySQL's configuration allows a key without value as follows: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.Section("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Well, I hate continuation lines, how do I disable that? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +Holy crap! + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +By default, spaces are used to align "=" sign between key and values, to disable that: + +```go +ini.PrettyFormat = false +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### Retrieve parent keys available to a child section + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Value Mapper + +To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 00000000..4595d310 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,696 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte` 或文件) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 + +#### 忽略键名的大小写 + +有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 和 sec2 指向同一个分区对象 +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 和 key2 指向同一个键对象 +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### 类似 MySQL 配置中的布尔值键 + +MySQL 的配置文件中会出现没有具体值的布尔类型的键: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +判断某个键是否存在: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.Section("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断某个原值是否存在: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +哇靠给力啊! + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: + +```go +ini.PrettyFormat = false +``` + +### 高级用法 + +#### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +#### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 获取上级父分区下的所有键名 + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +#### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 值映射器(Value Mapper) + +值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md new file mode 100644 index 00000000..8fdc89b4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/README.md @@ -0,0 +1,199 @@ +# Go support for Protocol Buffers + +Google's data interchange format. +Copyright 2010 The Go Authors. +https://github.com/golang/protobuf + +This package and the code it generates requires at least Go 1.4. + +This software implements Go bindings for protocol buffers. For +information about protocol buffers themselves, see + https://developers.google.com/protocol-buffers/ + +## Installation ## + +To use this software, you must: +- Install the standard C++ implementation of protocol buffers from + https://developers.google.com/protocol-buffers/ +- Of course, install the Go compiler and tools from + https://golang.org/ + See + https://golang.org/doc/install + for details or, if you are using gccgo, follow the instructions at + https://golang.org/doc/install/gccgo +- Grab the code from the repository and install the proto package. + The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`. + The compiler plugin, protoc-gen-go, will be installed in $GOBIN, + defaulting to $GOPATH/bin. It must be in your $PATH for the protocol + compiler, protoc, to find it. + +This software has two parts: a 'protocol compiler plugin' that +generates Go source files that, once compiled, can access and manage +protocol buffers; and a library that implements run-time support for +encoding (marshaling), decoding (unmarshaling), and accessing protocol +buffers. + +There is support for gRPC in Go using protocol buffers. +See the note at the bottom of this file for details. + +There are no insertion points in the plugin. + + +## Using protocol buffers with Go ## + +Once the software is installed, there are two steps to using it. +First you must compile the protocol buffer definitions and then import +them, with the support library, into your program. + +To compile the protocol buffer definition, run protoc with the --go_out +parameter set to the directory you want to output the Go code to. + + protoc --go_out=. *.proto + +The generated files will be suffixed .pb.go. See the Test code below +for an example using such a file. + + +The package comment for the proto library contains text describing +the interface provided in Go for protocol buffers. Here is an edited +version. + +========== + +The proto package converts data structures to and from the +wire format of protocol buffers. It works in concert with the +Go source code generated for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + Helpers for getting values are superseded by the + GetFoo methods and their use is deprecated. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed with the enum's type name. Enum types have + a String method, and a Enum method to assist in message construction. + - Nested groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +Consider file test.proto, containing + +```proto + package example; + + enum FOO { X = 17; }; + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } +``` + +To create and play with a Test object from the example package, + +```go + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + "path/to/example" + ) + + func main() { + test := &example.Test { + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &example.Test_OptionalGroup { + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &example.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +``` + +## Parameters ## + +To pass extra parameters to the plugin, use a comma-separated +parameter list separated from the output directory by a colon: + + + protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto + + +- `import_prefix=xxx` - a prefix that is added onto the beginning of + all imports. Useful for things like generating protos in a + subdirectory, or regenerating vendored protobufs in-place. +- `import_path=foo/bar` - used as the package if no input files + declare `go_package`. If it contains slashes, everything up to the + rightmost slash is ignored. +- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to + load. The only plugin in this repo is `grpc`. +- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is + associated with Go package quux/shme. This is subject to the + import_prefix parameter. + +## gRPC Support ## + +If a proto file specifies RPC services, protoc-gen-go can be instructed to +generate code compatible with gRPC (http://www.grpc.io/). To do this, pass +the `plugins` parameter to protoc-gen-go; the usual way is to insert it into +the --go_out argument to protoc: + + protoc --go_out=plugins=grpc:. *.proto + +## Plugins ## + +The `protoc-gen-go/generator` package exposes a plugin interface, +which is used by the gRPC code generation. This interface is not +supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md new file mode 100644 index 00000000..c60a31b0 --- /dev/null +++ b/vendor/github.com/gorilla/context/README.md @@ -0,0 +1,7 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md new file mode 100644 index 00000000..a340abe0 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/README.md @@ -0,0 +1,52 @@ +gorilla/handlers +================ +[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) [![Build Status](https://travis-ci.org/gorilla/handlers.svg?branch=master)](https://travis-ci.org/gorilla/handlers) + +Package handlers is a collection of handlers (aka "HTTP middleware") for use +with Go's `net/http` package (or any framework supporting `http.Handler`), including: + +* `LoggingHandler` for logging HTTP requests in the Apache [Common Log + Format](http://httpd.apache.org/docs/2.2/logs.html#common). +* `CombinedLoggingHandler` for logging HTTP requests in the Apache [Combined Log + Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by + both Apache and nginx. +* `CompressHandler` for gzipping responses. +* `ContentTypeHandler` for validating requests against a list of accepted + content types. +* `MethodHandler` for matching HTTP methods against handlers in a + `map[string]http.Handler` +* `ProxyHeaders` for populating `r.RemoteAddr` and `r.URL.Scheme` based on the + `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded` + headers when running a Go server behind a HTTP reverse proxy. +* `CanonicalHost` for re-directing to the preferred host when handling multiple + domains (i.e. multiple CNAME aliases). + +Other handlers are documented [on the Gorilla +website](http://www.gorillatoolkit.org/pkg/handlers). + +## Example + +A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`: + +```go +import ( + "net/http" + "github.com/gorilla/handlers" +) + +func main() { + r := http.NewServeMux() + + // Only log requests to our admin dashboard to stdout + r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard))) + r.HandleFunc("/", ShowIndex) + + // Wrap our server with our gzip handler to gzip compress all responses. + http.ListenAndServe(":8000", handlers.CompressHandler(r)) +} +``` + +## License + +BSD licensed. See the included LICENSE file for details. + diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md new file mode 100644 index 00000000..e60301b0 --- /dev/null +++ b/vendor/github.com/gorilla/mux/README.md @@ -0,0 +1,7 @@ +mux +=== +[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) + +gorilla/mux is a powerful URL router and dispatcher. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md new file mode 100644 index 00000000..7a950d17 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/README.md @@ -0,0 +1,23 @@ +# mousetrap + +mousetrap is a tiny library that answers a single question. + +On a Windows machine, was the process invoked by someone double clicking on +the executable file while browsing in explorer? + +### Motivation + +Windows developers unfamiliar with command line tools will often "double-click" +the executable for a tool. Because most CLI tools print the help and then exit +when invoked without arguments, this is often very frustrating for those users. + +mousetrap provides a way to detect these invocations so that you can provide +more helpful behavior and instructions on how to run the CLI tool. To see what +this looks like, both from an organizational and a technical perspective, see +https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ + +### The interface + +The library exposes a single interface: + + func StartedByExplorer() (bool) diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 00000000..187ef676 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md new file mode 100644 index 00000000..0e3356cb --- /dev/null +++ b/vendor/github.com/miekg/dns/README.md @@ -0,0 +1,154 @@ +[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) [![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns) + +# Alternative (more granular) approach to a DNS library + +> Less is more. + +Complete and usable DNS library. All widely used Resource Records are +supported, including the DNSSEC types. It follows a lean and mean philosophy. +If there is stuff you should know as a DNS programmer there isn't a convenience +function for it. Server side and client side programming is supported, i.e. you +can build servers and resolvers with it. + +We try to keep the "master" branch as sane as possible and at the bleeding edge +of standards, avoiding breaking changes wherever reasonable. We support the last +two versions of Go, currently: 1.5 and 1.6. + +# Goals + +* KISS; +* Fast; +* Small API, if its easy to code in Go, don't make a function for it. + +# Users + +A not-so-up-to-date-list-that-may-be-actually-current: + +* https://cloudflare.com +* https://github.com/abh/geodns +* http://www.statdns.com/ +* http://www.dnsinspect.com/ +* https://github.com/chuangbo/jianbing-dictionary-dns +* http://www.dns-lg.com/ +* https://github.com/fcambus/rrda +* https://github.com/kenshinx/godns +* https://github.com/skynetservices/skydns +* https://github.com/hashicorp/consul +* https://github.com/DevelopersPL/godnsagent +* https://github.com/duedil-ltd/discodns +* https://github.com/StalkR/dns-reverse-proxy +* https://github.com/tianon/rawdns +* https://mesosphere.github.io/mesos-dns/ +* https://pulse.turbobytes.com/ +* https://play.google.com/store/apps/details?id=com.turbobytes.dig +* https://github.com/fcambus/statzone +* https://github.com/benschw/dns-clb-go +* https://github.com/corny/dnscheck for http://public-dns.info/ +* https://namesmith.io +* https://github.com/miekg/unbound +* https://github.com/miekg/exdns +* https://dnslookup.org +* https://github.com/looterz/grimd +* https://github.com/phamhongviet/serf-dns +* https://github.com/mehrdadrad/mylg +* https://github.com/bamarni/dockness +* https://github.com/fffaraz/microdns + +Send pull request if you want to be listed here. + +# Features + +* UDP/TCP queries, IPv4 and IPv6; +* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported; +* Fast: + * Reply speed around ~ 80K qps (faster hardware results in more qps); + * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds; +* Server side programming (mimicking the net/http package); +* Client side programming; +* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA; +* EDNS0, NSID, Cookies; +* AXFR/IXFR; +* TSIG, SIG(0); +* DNS over TLS: optional encrypted connection between client and server; +* DNS name compression; +* Depends only on the standard library. + +Have fun! + +Miek Gieben - 2010-2012 - + +# Building + +Building is done with the `go` tool. If you have setup your GOPATH +correctly, the following should work: + + go get github.com/miekg/dns + go build github.com/miekg/dns + +## Examples + +A short "how to use the API" is at the beginning of doc.go (this also will show +when you call `godoc github.com/miekg/dns`). + +Example programs can be found in the `github.com/miekg/exdns` repository. + +## Supported RFCs + +*all of them* + +* 103{4,5} - DNS standard +* 1348 - NSAP record (removed the record) +* 1982 - Serial Arithmetic +* 1876 - LOC record +* 1995 - IXFR +* 1996 - DNS notify +* 2136 - DNS Update (dynamic updates) +* 2181 - RRset definition - there is no RRset type though, just []RR +* 2537 - RSAMD5 DNS keys +* 2065 - DNSSEC (updated in later RFCs) +* 2671 - EDNS record +* 2782 - SRV record +* 2845 - TSIG record +* 2915 - NAPTR record +* 2929 - DNS IANA Considerations +* 3110 - RSASHA1 DNS keys +* 3225 - DO bit (DNSSEC OK) +* 340{1,2,3} - NAPTR record +* 3445 - Limiting the scope of (DNS)KEY +* 3597 - Unknown RRs +* 403{3,4,5} - DNSSEC + validation functions +* 4255 - SSHFP record +* 4343 - Case insensitivity +* 4408 - SPF record +* 4509 - SHA256 Hash in DS +* 4592 - Wildcards in the DNS +* 4635 - HMAC SHA TSIG +* 4701 - DHCID +* 4892 - id.server +* 5001 - NSID +* 5155 - NSEC3 record +* 5205 - HIP record +* 5702 - SHA2 in the DNS +* 5936 - AXFR +* 5966 - TCP implementation recommendations +* 6605 - ECDSA +* 6725 - IANA Registry Update +* 6742 - ILNP DNS +* 6840 - Clarifications and Implementation Notes for DNS Security +* 6844 - CAA record +* 6891 - EDNS0 update +* 6895 - DNS IANA considerations +* 6975 - Algorithm Understanding in DNSSEC +* 7043 - EUI48/EUI64 records +* 7314 - DNS (EDNS) EXPIRE Option +* 7553 - URI record +* 7858 - DNS over TLS: Initiation and Performance Considerations (draft) +* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies) +* xxxx - EDNS0 DNS Update Lease (draft) + +## Loosely based upon + +* `ldns` +* `NSD` +* `Net::DNS` +* `GRONG` diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 00000000..659d6885 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/ncw/swift/README.md b/vendor/github.com/ncw/swift/README.md new file mode 100644 index 00000000..2cc24cff --- /dev/null +++ b/vendor/github.com/ncw/swift/README.md @@ -0,0 +1,140 @@ +Swift +===== + +This package provides an easy to use library for interfacing with +Swift / Openstack Object Storage / Rackspace cloud files from the Go +Language + +See here for package docs + + http://godoc.org/github.com/ncw/swift + +[![Build Status](https://api.travis-ci.org/ncw/swift.svg?branch=master)](https://travis-ci.org/ncw/swift) [![GoDoc](https://godoc.org/github.com/ncw/swift?status.svg)](https://godoc.org/github.com/ncw/swift) + +Install +------- + +Use go to install the library + + go get github.com/ncw/swift + +Usage +----- + +See here for full package docs + +- http://godoc.org/github.com/ncw/swift + +Here is a short example from the docs + + import "github.com/ncw/swift" + + // Create a connection + c := swift.Connection{ + UserName: "user", + ApiKey: "key", + AuthUrl: "auth_url", + Domain: "domain", // Name of the domain (v3 auth only) + Tenant: "tenant", // Name of the tenant (v2 auth only) + } + // Authenticate + err := c.Authenticate() + if err != nil { + panic(err) + } + // List all the containers + containers, err := c.ContainerNames(nil) + fmt.Println(containers) + // etc... + +Additions +--------- + +The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface. + +Testing +------- + +To run the tests you can either use an embedded fake Swift server +either use a real Openstack Swift server or a Rackspace Cloud files account. + +When using a real Swift server, you need to set these environment variables +before running the tests + + export SWIFT_API_USER='user' + export SWIFT_API_KEY='key' + export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0' + +And optionally these if using v2 authentication + + export SWIFT_TENANT='TenantName' + export SWIFT_TENANT_ID='TenantId' + +And optionally these if using v3 authentication + + export SWIFT_TENANT='TenantName' + export SWIFT_TENANT_ID='TenantId' + export SWIFT_API_DOMAIN_ID='domain id' + export SWIFT_API_DOMAIN='domain name' + +And optionally these if using v3 trust + + export SWIFT_TRUST_ID='TrustId' + +And optionally this if you want to skip server certificate validation + + export SWIFT_AUTH_INSECURE=1 + +And optionally this to configure the connect channel timeout, in seconds + + export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60 + +And optionally this to configure the data channel timeout, in seconds + + export SWIFT_DATA_CHANNEL_TIMEOUT=60 + +Then run the tests with `go test` + +License +------- + +This is free software under the terms of MIT license (check COPYING file +included in this package). + +Contact and support +------------------- + +The project website is at: + +- https://github.com/ncw/swift + +There you can file bug reports, ask for help or contribute patches. + +Authors +------- + +- Nick Craig-Wood + +Contributors +------------ + +- Brian "bojo" Jones +- Janika Liiv +- Yamamoto, Hirotaka +- Stephen +- platformpurple +- Paul Querna +- Livio Soares +- thesyncim +- lsowen +- Sylvain Baubeau +- Chris Kastorff +- Dai HaoJun +- Hua Wang +- Fabian Ruff +- Arturo Reuschenbach Puncernau +- Petr Kotek +- Stefan Majewsky +- Cezar Sa Espinola +- Sam Gunaratne +- Richard Scothern diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.code b/vendor/github.com/opencontainers/go-digest/LICENSE.code new file mode 100644 index 00000000..0ea3ff81 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs new file mode 100644 index 00000000..e26cd4fc --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md new file mode 100644 index 00000000..9d6174cf --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/README.md @@ -0,0 +1,104 @@ +# go-digest + +[![GoDoc](https://godoc.org/github.com/docker/go-digest?status.svg)](https://godoc.org/github.com/docker/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/docker/go-digest)](https://goreportcard.com/report/github.com/docker/go-digest) [![Build Status](https://travis-ci.org/docker/go-digest.svg?branch=master)](https://travis-ci.org/docker/go-digest) + +Common digest package used across the container ecosystem. + +Please see the [godoc](https://godoc.org/github.com/docker/go-digest) for more information. + +# What is a digest? + +A digest is just a hash. + +The most common use case for a digest is to create a content +identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) +systems: + +```go +id := digest.FromBytes([]byte("my content")) +``` + +In the example above, the id can be used to uniquely identify +the byte slice "my content". This allows two disparate applications +to agree on a verifiable identifier without having to trust one +another. + +An identifying digest can be verified, as follows: + +```go +if id != digest.FromBytes([]byte("my content")) { + return errors.New("the content has changed!") +} +``` + +A `Verifier` type can be used to handle cases where an `io.Reader` +makes more sense: + +```go +rd := getContent() +verifier := id.Verifier() +io.Copy(verifier, rd) + +if !verifier.Verified() { + return errors.New("the content has changed!") +} +``` + +Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this +can power a rich, safe, content distribution system. + +# Usage + +While the [godoc](https://godoc.org/github.com/docker/go-digest) is +considered the best resource, a few important items need to be called +out when using this package. + +1. Make sure to import the hash implementations into your application + or the package will panic. You should have something like the + following in the main (or other entrypoint) of your application: + + ```go + import ( + _ "crypto/sha256" + _ "crypto/sha512" + ) + ``` + This may seem inconvenient but it allows you replace the hash + implementations with others, such as https://github.com/stevvooe/resumable. + +2. Even though `digest.Digest` may be assemable as a string, _always_ + verify your input with `digest.Parse` or use `Digest.Validate` + when accepting untrusted input. While there are measures to + avoid common problems, this will ensure you have valid digests + in the rest of your application. + +# Stability + +The Go API, at this stage, is considered stable, unless otherwise noted. + +As always, before using a package export, read the [godoc](https://godoc.org/github.com/docker/go-digest). + +# Contributing + +This package is considered fairly complete. It has been in production +in thousands (millions?) of deployments and is fairly battle-hardened. +New additions will be met with skepticism. If you think there is a +missing feature, please file a bug clearly describing the problem and +the alternatives you tried before submitting a PR. + +# Reporting security issues + +The maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please DO NOT file a public issue, instead send your report privately +to security@docker.com. + +Security reports are greatly appreciated and we will publicly thank you +for it. We also like to send gifts—if you're into Docker schwag, make +sure to let us know. We currently do not offer a paid security bounty +program, but are not ruling it out in the future. + +# Copyright and license + +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt new file mode 100644 index 00000000..298f0e26 --- /dev/null +++ b/vendor/github.com/spf13/cobra/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md new file mode 100644 index 00000000..b1fb0889 --- /dev/null +++ b/vendor/github.com/spf13/cobra/README.md @@ -0,0 +1,485 @@ +# Cobra + +A Commander for modern go CLI interactions + +[![Build Status](https://travis-ci.org/spf13/cobra.svg)](https://travis-ci.org/spf13/cobra) + +## Overview + +Cobra is a commander providing a simple interface to create powerful modern CLI +interfaces similar to git & go tools. In addition to providing an interface, Cobra +simultaneously provides a controller to organize your application code. + +Inspired by go, go-Commander, gh and subcommand, Cobra improves on these by +providing **fully posix compliant flags** (including short & long versions), +**nesting commands**, and the ability to **define your own help and usage** for any or +all commands. + +Cobra has an exceptionally clean interface and simple design without needless +constructors or initialization methods. + +Applications built with Cobra commands are designed to be as user friendly as +possible. Flags can be placed before or after the command (as long as a +confusing space isn’t provided). Both short and long flags can be used. A +command need not even be fully typed. The shortest unambiguous string will +suffice. Help is automatically generated and available for the application or +for a specific command using either the help command or the --help flag. + +## Concepts + +Cobra is built on a structure of commands & flags. + +**Commands** represent actions and **Flags** are modifiers for those actions. + +In the following example 'server' is a command and 'port' is a flag. + + hugo server --port=1313 + +### Commands + +Command is the central point of the application. Each interaction that +the application supports will be contained in a Command. A command can +have children commands and optionally run an action. + +In the example above 'server' is the command + +A Command has the following structure: + + type Command struct { + Use string // The one-line usage message. + Short string // The short description shown in the 'help' output. + Long string // The long message shown in the 'help ' output. + Run func(cmd *Command, args []string) // Run runs the command. + } + +### Flags + +A Flag is a way to modify the behavior of an command. Cobra supports +fully posix compliant flags as well as the go flag package. +A Cobra command can define flags that persist through to children commands +and flags that are only available to that command. + +In the example above 'port' is the flag. + +Flag functionality is provided by the [pflag +library](https://github.com/ogier/pflag), a fork of the flag standard library +which maintains the same interface while adding posix compliance. + +## Usage + +Cobra works by creating a set of commands and then organizing them into a tree. +The tree defines the structure of the application. + +Once each command is defined with it's corresponding flags, then the +tree is assigned to the commander which is finally executed. + +### Installing +Using Cobra is easy. First use go get to install the latest version +of the library. + + $ go get github.com/spf13/cobra + +Next include cobra in your application. + + import "github.com/spf13/cobra" + +### Create the root command + +The root command represents your binary itself. + +Cobra doesn't require any special constructors. Simply create your commands. + + var HugoCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, + } + +### Create additional commands + +Additional commands can be defined. + + var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, + } + +### Attach command to its parent +In this example we are attaching it to the root, but commands can be attached at any level. + + HugoCmd.AddCommand(versionCmd) + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + + var Verbose bool + var Source string + +There are two different approaches to assign a flag. + +#### Persistent Flags + +A flag can be 'persistent' meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags assign a flag as a persistent flag on the root. + + HugoCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") + +#### Local Flags + +A flag can also be assigned locally which will only apply to that specific command. + + HugoCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") + +### Remove a command from its parent + +Removing a command is not a common action in simple programs but it allows 3rd parties to customize an existing command tree. + +In this example, we remove the existing `VersionCmd` command of an existing root command, and we replace it by our own version. + + mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) + mainlib.RootCmd.AddCommand(versionCmd) + +### Once all commands and flags are defined, Execute the commands + +Execute should be run on the root for clarity, though it can be called on any command. + + HugoCmd.Execute() + +## Example + +In the example below we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + + import( + "github.com/spf13/cobra" + "fmt" + "strings" + ) + + func main() { + + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. + For many years people have printed back to the screen. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. + Echo works a lot like print, except it has a child command. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [# times] [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing + a count and a string.`, + Run: func(cmd *cobra.Command, args []string) { + for i:=0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() + } + +For a more complete example of a larger application, please checkout [Hugo](http://hugo.spf13.com) + +## The Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally help will also +support all other commands as input. Say for instance you have a command called +'create' without any additional configuration cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by cobra. Nothing beyond the +command and flag definitions are needed. + + > hugo help + + A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + + Complete documentation is available at http://hugo.spf13.com + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server :: Hugo runs it's own a webserver to render the files + version :: Print the version number of Hugo + check :: Check content in the source directory + benchmark :: Benchmark hugo by building a site a number of times + help [command] :: Help about any command + + Available Flags: + -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ + -D, --build-drafts=false: include content marked as draft + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + -s, --source="": filesystem path to read files relative from + --stepAnalysis=false: display memory and timing of different steps of the program + --uglyurls=false: if true, use /filename.html instead of /filename/ + -v, --verbose=false: verbose output + -w, --watch=false: watch filesystem for changes and recreate as needed + + Use "hugo help [command]" for more information about that command. + + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or you own template for the default command to use. + +The default help command is + + func (c *Command) initHelp() { + if c.helpCommand == nil { + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + Run: c.HelpFunc(), + } + } + c.AddCommand(c.helpCommand) + } + +You can provide your own command, function or template through the following methods. + + command.SetHelpCommand(cmd *Command) + + command.SetHelpFunc(f func(*Command, []string)) + + command.SetHelpTemplate(s string) + +The latter two will also apply to any children commands. + +## Usage + +When the user provides an invalid flag or invalid command Cobra responds by +showing the user the 'usage' + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of it's output. + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server Hugo runs it's own a webserver to render the files + version Print the version number of Hugo + check Check content in the source directory + benchmark Benchmark hugo by building a site a number of times + help [command] Help about any command + + Available Flags: + -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ + -D, --build-drafts=false: include content marked as draft + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + -s, --source="": filesystem path to read files relative from + --stepAnalysis=false: display memory and timing of different steps of the program + --uglyurls=false: if true, use /filename.html instead of /filename/ + -v, --verbose=false: verbose output + -w, --watch=false: watch filesystem for changes and recreate as needed + +### Defining your own usage +You can provide your own usage function or template for cobra to use. + +The default usage function is + + return func(c *Command) error { + err := tmpl(c.Out(), c.UsageTemplate(), c) + return err + } + +Like help the function and template are over ridable through public methods. + + command.SetUsageFunc(f func(*Command) error) + + command.SetUsageTemplate(s string) + +## PreRun or PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistendPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistenPostRun` + +And example of two commands which use all of these features is below. When the subcommand in executed it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun` + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My sub command", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + _ = rootCmd.Execute() + fmt.Print("\n") + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + _ = rootCmd.Execute() +} +``` + +## Generating markdown formatted documentation for your command + +Cobra can generate a markdown formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](md_docs.md) + +## Generating bash completions for your command + +Cobra can generate a bash completions file. If you add more information to your command these completions can be amazingly powerful and flexible. Read more about [Bash Completions](bash_completions.md) + +## Debugging + +Cobra provides a ‘DebugFlags’ method on a command which when called will print +out everything Cobra knows about the flags for each command + +### Example + + command.DebugFlags() + +## Release Notes +* **0.9.0** June 17, 2014 + * flags can appears anywhere in the args (provided they are unambiguous) + * --help prints usage screen for app or command + * Prefix matching for commands + * Cleaner looking help and usage output + * Extensive test suite +* **0.8.0** Nov 5, 2013 + * Reworked interface to remove commander completely + * Command now primary structure + * No initialization needed + * Usage & Help templates & functions definable at any level + * Updated Readme +* **0.7.0** Sept 24, 2013 + * Needs more eyes + * Test suite + * Support for automatic error messages + * Support for help command + * Support for printing to any io.Writer instead of os.Stderr + * Support for persistent flags which cascade down tree + * Ready for integration into Hugo +* **0.1.0** Sept 3, 2013 + * Implement first draft + +## ToDo +* Launch proper documentation site + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13) + +## License + +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) + + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") + diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md new file mode 100644 index 00000000..f7d63500 --- /dev/null +++ b/vendor/github.com/spf13/pflag/README.md @@ -0,0 +1,191 @@ +[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) + +## Description + +pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the [GNU extensions to the POSIX recommendations +for command-line options][1]. For a more precise description, see the +"Command-line flag syntax" section below. + +[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +pflag is available under the same style of BSD license as the Go language, +which can be found in the LICENSE file. + +## Installation + +pflag is available using the standard `go get` command. + +Install by running: + + go get github.com/ogier/pflag + +Run tests by running: + + go test github.com/ogier/pflag + +## Usage + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + +``` go +import flag "github.com/ogier/pflag" +``` + +There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + +``` go +var ip *int = flag.Int("flagname", 1234, "help message for flagname") +``` + +If you like, you can bind the flag to a variable using the Var() functions. + +``` go +var flagvar int +func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") +} +``` + +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + +``` go +flag.Var(&flagVal, "name", "help message for flagname") +``` + +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + +``` go +flag.Parse() +``` + +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + +``` go +fmt.Println("ip has value ", *ip) +fmt.Println("flagvar has value ", flagvar) +``` + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + +``` go +var ip = flag.IntP("flagname", "f", 1234, "help message") +var flagvar bool +func init() { + flag.BoolVarP("boolname", "b", true, "help message") +} +flag.VarP(&flagVar, "varname", "v", 1234, "help message") +``` + +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. + +## Command line flag syntax + +``` +--flag // boolean flags only +--flag=x +``` + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags. + +``` +// boolean flags +-f +-abc + +// non-boolean flags +-n 1234 +-Ifile + +// mixed +-abcs "hello" +-abcn1234 +``` + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +## Mutating or "Normalizing" Flag names + +It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. + +**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag + +```go +func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + from := []string{"-", "_"} + to := "." + for _, sep := range from { + name = strings.Replace(name, sep, to, -1) + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) +``` + +**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name + +```go +func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + switch name { + case "old-flag-name": + name = "new-flag-name" + break + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) +``` + +## More info + +You can see the full reference documentation of the pflag package +[at godoc.org][3], or through go's standard documentation system by +running `godoc -http=:6060` and browsing to +[http://localhost:6060/pkg/github.com/ogier/pflag][2] after +installation. + +[2]: http://localhost:6060/pkg/github.com/ogier/pflag +[3]: http://godoc.org/github.com/ogier/pflag diff --git a/vendor/github.com/stevvooe/resumable/README.md b/vendor/github.com/stevvooe/resumable/README.md new file mode 100644 index 00000000..d2d3fb89 --- /dev/null +++ b/vendor/github.com/stevvooe/resumable/README.md @@ -0,0 +1,6 @@ +# go-crypto +A Subset of the Go `crypto` Package with a Resumable Hash Interface + +### Documentation + +GoDocs: http://godoc.org/github.com/stevvooe/resumable diff --git a/vendor/github.com/xenolf/lego/README.md b/vendor/github.com/xenolf/lego/README.md new file mode 100644 index 00000000..ff692132 --- /dev/null +++ b/vendor/github.com/xenolf/lego/README.md @@ -0,0 +1,248 @@ +# lego +Let's Encrypt client and ACME library written in Go + +[![GoDoc](https://godoc.org/github.com/xenolf/lego/acme?status.svg)](https://godoc.org/github.com/xenolf/lego/acme) +[![Build Status](https://travis-ci.org/xenolf/lego.svg?branch=master)](https://travis-ci.org/xenolf/lego) +[![Dev Chat](https://img.shields.io/badge/dev%20chat-gitter-blue.svg?label=dev+chat)](https://gitter.im/xenolf/lego) + +#### General +This is a work in progress. Please do *NOT* run this on a production server and please report any bugs you find! + +#### Installation +lego supports both binary installs and install from source. + +To get the binary just download the latest release for your OS/Arch from [the release page](https://github.com/xenolf/lego/releases) +and put the binary somewhere convenient. lego does not assume anything about the location you run it from. + +To install from source, just run +``` +go get -u github.com/xenolf/lego +``` + +To build lego inside a Docker container, just run +``` +docker build -t lego . +``` + +#### Features + +- Register with CA +- Obtain certificates +- Renew certificates +- Revoke certificates +- Robust implementation of all ACME challenges + - HTTP (http-01) + - TLS with Server Name Indication (tls-sni-01) + - DNS (dns-01) +- SAN certificate support +- Comes with multiple optional [DNS providers](https://github.com/xenolf/lego/tree/master/providers/dns) +- [Custom challenge solvers](https://github.com/xenolf/lego/wiki/Writing-a-Challenge-Solver) +- Certificate bundling +- OCSP helper function + +Please keep in mind that CLI switches and APIs are still subject to change. + +When using the standard `--path` option, all certificates and account configurations are saved to a folder *.lego* in the current working directory. + +#### Sudo +The CLI does not require root permissions but needs to bind to port 80 and 443 for certain challenges. +To run the CLI without sudo, you have four options: + +- Use setcap 'cap_net_bind_service=+ep' /path/to/program +- Pass the `--http` or/and the `--tls` option and specify a custom port to bind to. In this case you have to forward port 80/443 to these custom ports (see [Port Usage](#port-usage)). +- Pass the `--webroot` option and specify the path to your webroot folder. In this case the challenge will be written in a file in `.well-known/acme-challenge/` inside your webroot. +- Pass the `--dns` option and specify a DNS provider. + +#### Port Usage +By default lego assumes it is able to bind to ports 80 and 443 to solve challenges. +If this is not possible in your environment, you can use the `--http` and `--tls` options to instruct +lego to listen on that interface:port for any incoming challenges. + +If you are using this option, make sure you proxy all of the following traffic to these ports. + +HTTP Port: +- All plaintext HTTP requests to port 80 which begin with a request path of `/.well-known/acme-challenge/` for the HTTP challenge. + +TLS Port: +- All TLS handshakes on port 443 for the TLS-SNI challenge. + +This traffic redirection is only needed as long as lego solves challenges. As soon as you have received your certificates you can deactivate the forwarding. + +#### Usage + +``` +NAME: + lego - Let's Encrypt client written in Go + +USAGE: + lego [global options] command [command options] [arguments...] + +VERSION: + 0.3.0 + +COMMANDS: + run Register an account, then create and install a certificate + revoke Revoke a certificate + renew Renew a certificate + dnshelp Shows additional help for the --dns global option + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --domains, -d [--domains option --domains option] Add domains to the process + --server, -s "https://acme-v01.api.letsencrypt.org/directory" CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client. + --email, -m Email used for registration and recovery contact. + --accept-tos, -a By setting this flag to true you indicate that you accept the current Let's Encrypt terms of service. + --key-type, -k "rsa2048" Key type to use for private keys. Supported: rsa2048, rsa4096, rsa8192, ec256, ec384 + --path "${CWD}/.lego" Directory to use for storing the data + --exclude, -x [--exclude option --exclude option] Explicitly disallow solvers by name from being used. Solvers: "http-01", "tls-sni-01". + --webroot Set the webroot folder to use for HTTP based challenges to write directly in a file in .well-known/acme-challenge + --http Set the port and interface to use for HTTP based challenges to listen on. Supported: interface:port or :port + --tls Set the port and interface to use for TLS based challenges to listen on. Supported: interface:port or :port + --dns Solve a DNS challenge using the specified provider. Disables all other challenges. Run 'lego dnshelp' for help on usage. + --help, -h show help + --version, -v print the version +``` + +##### CLI Example + +Assumes the `lego` binary has permission to bind to ports 80 and 443. You can get a pre-built binary from the [releases](https://github.com/xenolf/lego/releases) page. +If your environment does not allow you to bind to these ports, please read [Port Usage](#port-usage). + +Obtain a certificate: + +```bash +$ lego --email="foo@bar.com" --domains="example.com" run +``` + +(Find your certificate in the `.lego` folder of current working directory.) + +To renew the certificate: + +```bash +$ lego --email="foo@bar.com" --domains="example.com" renew +``` + +Obtain a certificate using the DNS challenge and AWS Route 53: + +```bash +$ AWS_REGION=us-east-1 AWS_ACCESS_KEY_ID=my_id AWS_SECRET_ACCESS_KEY=my_key lego --email="foo@bar.com" --domains="example.com" --dns="route53" run +``` + +Note that `--dns=foo` implies `--exclude=http-01` and `--exclude=tls-sni-01`. lego will not attempt other challenges if you've told it to use DNS instead. + +lego defaults to communicating with the production Let's Encrypt ACME server. If you'd like to test something without issuing real certificates, consider using the staging endpoint instead: + +```bash +$ lego --server=https://acme-staging.api.letsencrypt.org/directory … +``` + +#### DNS Challenge API Details + +##### AWS Route 53 + +The following AWS IAM policy document describes the permissions required for lego to complete the DNS challenge. +Replace `` with the Route 53 zone ID of the domain you are authorizing. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "route53:GetChange", + "route53:ListHostedZonesByName" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "route53:ChangeResourceRecordSets" + ], + "Resource": [ + "arn:aws:route53:::hostedzone/" + ] + } + ] +} +``` + +#### ACME Library Usage + +A valid, but bare-bones example use of the acme package: + +```go +// You'll need a user or account type that implements acme.User +type MyUser struct { + Email string + Registration *acme.RegistrationResource + key crypto.PrivateKey +} +func (u MyUser) GetEmail() string { + return u.Email +} +func (u MyUser) GetRegistration() *acme.RegistrationResource { + return u.Registration +} +func (u MyUser) GetPrivateKey() crypto.PrivateKey { + return u.key +} + +// Create a user. New accounts need an email and private key to start. +const rsaKeySize = 2048 +privateKey, err := rsa.GenerateKey(rand.Reader, rsaKeySize) +if err != nil { + log.Fatal(err) +} +myUser := MyUser{ + Email: "you@yours.com", + key: privateKey, +} + +// A client facilitates communication with the CA server. This CA URL is +// configured for a local dev instance of Boulder running in Docker in a VM. +client, err := acme.NewClient("http://192.168.99.100:4000", &myUser, acme.RSA2048) +if err != nil { + log.Fatal(err) +} + +// We specify an http port of 5002 and an tls port of 5001 on all interfaces +// because we aren't running as root and can't bind a listener to port 80 and 443 +// (used later when we attempt to pass challenges). Keep in mind that we still +// need to proxy challenge traffic to port 5002 and 5001. +client.SetHTTPAddress(":5002") +client.SetTLSAddress(":5001") + +// New users will need to register +reg, err := client.Register() +if err != nil { + log.Fatal(err) +} +myUser.Registration = reg + +// SAVE THE USER. + +// The client has a URL to the current Let's Encrypt Subscriber +// Agreement. The user will need to agree to it. +err = client.AgreeToTOS() +if err != nil { + log.Fatal(err) +} + +// The acme library takes care of completing the challenges to obtain the certificate(s). +// The domains must resolve to this machine or you have to use the DNS challenge. +bundle := false +certificates, failures := client.ObtainCertificate([]string{"mydomain.com"}, bundle, nil) +if len(failures) > 0 { + log.Fatal(failures) +} + +// Each certificate comes back with the cert bytes, the bytes of the client's +// private key, and a certificate URL. SAVE THESE TO DISK. +fmt.Printf("%#v\n", certificates) + +// ... all done. +``` diff --git a/vendor/github.com/yvasiyarov/go-metrics/README.md b/vendor/github.com/yvasiyarov/go-metrics/README.md new file mode 100644 index 00000000..e0091a4b --- /dev/null +++ b/vendor/github.com/yvasiyarov/go-metrics/README.md @@ -0,0 +1,104 @@ +go-metrics +========== + +Go port of Coda Hale's Metrics library: . + +Documentation: . + +Usage +----- + +Create and update metrics: + +```go +c := metrics.NewCounter() +metrics.Register("foo", c) +c.Inc(47) + +g := metrics.NewGauge() +metrics.Register("bar", g) +g.Update(47) + +s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) +h := metrics.NewHistogram(s) +metrics.Register("baz", h) +h.Update(47) + +m := metrics.NewMeter() +metrics.Register("quux", m) +m.Mark(47) + +t := metrics.NewTimer() +metrics.Register("bang", t) +t.Time(func() {}) +t.Update(47) +``` + +Periodically log every metric in human-readable form to standard error: + +```go +go metrics.Log(metrics.DefaultRegistry, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) +``` + +Periodically log every metric in slightly-more-parseable form to syslog: + +```go +w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") +go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) +``` + +Periodically emit every metric to Graphite: + +```go +addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") +go metrics.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) +``` + +Periodically emit every metric into InfluxDB: + +```go +import "github.com/rcrowley/go-metrics/influxdb" + +go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{ + Host: "127.0.0.1:8086", + Database: "metrics", + Username: "test", + Password: "test", +}) +``` + +Periodically upload every metric to Librato: + +```go +import "github.com/rcrowley/go-metrics/librato" + +go librato.Librato(metrics.DefaultRegistry, + 10e9, // interval + "example@example.com", // account owner email address + "token", // Librato API token + "hostname", // source + []float64{0.95}, // precentiles to send + time.Millisecond, // time unit +) +``` + +Periodically emit every metric to StatHat: + +```go +import "github.com/rcrowley/go-metrics/stathat" + +go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") +``` + +Installation +------------ + +```sh +go get github.com/rcrowley/go-metrics +``` + +StatHat support additionally requires their Go client: + +```sh +go get github.com/stathat/go +``` diff --git a/vendor/github.com/yvasiyarov/gorelic/README.md b/vendor/github.com/yvasiyarov/gorelic/README.md new file mode 100644 index 00000000..61068a82 --- /dev/null +++ b/vendor/github.com/yvasiyarov/gorelic/README.md @@ -0,0 +1,119 @@ +# GoRelic + +New Relic agent for Go runtime. It collect a lot of metrics about scheduler, garbage collector and memory allocator and +send them to NewRelic. + +### Requirements +- Go 1.1 or higher +- github.com/yvasiyarov/gorelic +- github.com/yvasiyarov/newrelic_platform_go +- github.com/yvasiyarov/go-metrics + +You have to install manually only first two dependencies. All other dependencies will be installed automatically +by Go toolchain. + +### Installation +```bash +go get github.com/yvasiyarov/gorelic +``` +and add to the initialization part of your application following code: +```go +import ( + "github.com/yvasiyarov/gorelic" +) +.... + +agent := gorelic.NewAgent() +agent.Verbose = true +agent.NewrelicLicense = "YOUR NEWRELIC LICENSE KEY THERE" +agent.Run() + +``` + +### Middleware +If you using Beego, Martini, Revel or Gin framework you can hook up gorelic with your application by using the following middleware: +- https://github.com/yvasiyarov/beego_gorelic +- https://github.com/yvasiyarov/martini_gorelic +- https://github.com/yvasiyarov/gocraft_gorelic +- http://wiki.colar.net/revel_newelic +- https://github.com/jingweno/negroni-gorelic +- https://github.com/brandfolder/gin-gorelic + + +### Configuration +- NewrelicLicense - its the only mandatory setting of this agent. +- NewrelicName - component name in NewRelic dashboard. Default value: "Go daemon" +- NewrelicPollInterval - how often metrics will be sent to NewRelic. Default value: 60 seconds +- Verbose - print some usefull for debugging information. Default value: false +- CollectGcStat - should agent collect garbage collector statistic or not. Default value: true +- CollectHTTPStat - should agent collect HTTP metrics. Default value: false +- CollectMemoryStat - should agent collect memory allocator statistic or not. Default value: true +- GCPollInterval - how often should GC statistic collected. Default value: 10 seconds. It has performance impact. For more information, please, see metrics documentation. +- MemoryAllocatorPollInterval - how often should memory allocator statistic collected. Default value: 60 seconds. It has performance impact. For more information, please, read metrics documentation. + + +## Metrics reported by plugin +This agent use functions exposed by runtime or runtime/debug packages to collect most important information about Go runtime. + +### General metrics +- Runtime/General/NOGoroutines - number of runned go routines, as it reported by NumGoroutine() from runtime package +- Runtime/General/NOCgoCalls - number of runned cgo calls, as it reported by NumCgoCall() from runtime package + +### Garbage collector metrics +- Runtime/GC/NumberOfGCCalls - Nuber of GC calls, as it reported by ReadGCStats() from runtime/debug +- Runtime/GC/PauseTotalTime - Total pause time diring GC calls, as it reported by ReadGCStats() from runtime/debug (in nanoseconds) +- Runtime/GC/GCTime/Max - max GC time +- Runtime/GC/GCTime/Min - min GC time +- Runtime/GC/GCTime/Mean - GC mean time +- Runtime/GC/GCTime/Percentile95 - 95% percentile of GC time + +All this metrics are measured in nanoseconds. Last 4 of them can be inaccurate if GC called more often then once in GCPollInterval. +If in your workload GC is called more often - you can consider decreasing value of GCPollInterval. +But be carefull, ReadGCStats() blocks mheap, so its not good idea to set GCPollInterval to very low values. + +### Memory allocator +- Component/Runtime/Memory/SysMem/Total - number of bytes/minute allocated from OS totally. +- Component/Runtime/Memory/SysMem/Stack - number of bytes/minute allocated from OS for stacks. +- Component/Runtime/Memory/SysMem/MSpan - number of bytes/minute allocated from OS for internal MSpan structs. +- Component/Runtime/Memory/SysMem/MCache - number of bytes/minute allocated from OS for internal MCache structs. +- Component/Runtime/Memory/SysMem/Heap - number of bytes/minute allocated from OS for heap. +- Component/Runtime/Memory/SysMem/BuckHash - number of bytes/minute allocated from OS for internal BuckHash structs. +- Component/Runtime/Memory/Operations/NoFrees - number of memory frees per minute +- Component/Runtime/Memory/Operations/NoMallocs - number of memory allocations per minute +- Component/Runtime/Memory/Operations/NoPointerLookups - number of pointer lookups per minute +- Component/Runtime/Memory/InUse/Total - total amount of memory in use +- Component/Runtime/Memory/InUse/Heap - amount of memory in use for heap +- Component/Runtime/Memory/InUse/MCacheInuse - amount of memory in use for MCache internal structures +- Component/Runtime/Memory/InUse/MSpanInuse - amount of memory in use for MSpan internal structures +- Component/Runtime/Memory/InUse/Stack - amount of memory in use for stacks + +### Process metrics +- Component/Runtime/System/Threads - number of OS threads used +- Runtime/System/FDSize - number of file descriptors, used by process +- Runtime/System/Memory/VmPeakSize - VM max size +- Runtime/System/Memory/VmCurrent - VM current size +- Runtime/System/Memory/RssPeak - max size of resident memory set +- Runtime/System/Memory/RssCurrent - current size of resident memory set + +All this metrics collected once in MemoryAllocatorPollInterval. In order to collect this statistic agent use ReadMemStats() routine. +This routine calls stoptheworld() internally and it block everything. So, please, consider this when you change MemoryAllocatorPollInterval value. + +### HTTP metrics +- throughput (requests per second), calculated for last minute +- mean throughput (requests per second) +- mean response time +- min response time +- max response time +- 75%, 90%, 95% percentiles for response time + + +In order to collect HTTP metrics, handler functions must be wrapped using WrapHTTPHandlerFunc: + +```go +http.HandleFunc("/", agent.WrapHTTPHandlerFunc(handler)) +``` + +## TODO +- Collect per-size allocation statistic +- Collect user defined metrics + diff --git a/vendor/github.com/yvasiyarov/newrelic_platform_go/README.md b/vendor/github.com/yvasiyarov/newrelic_platform_go/README.md new file mode 100644 index 00000000..34462344 --- /dev/null +++ b/vendor/github.com/yvasiyarov/newrelic_platform_go/README.md @@ -0,0 +1,11 @@ +New Relic Platform Agent SDK for Go(golang) +==================== + +[![Build Status](https://travis-ci.org/yvasiyarov/newrelic_platform_go.png?branch=master)](https://travis-ci.org/yvasiyarov/newrelic_platform_go) + +This package provide very simple interface to NewRelic Platform http://newrelic.com/platform + +For example of usage see examples/wave_plugin.go + +For real-word example, you can have a look at: +https://github.com/yvasiyarov/newrelic_sphinx diff --git a/vendor/golang.org/x/crypto/README b/vendor/golang.org/x/crypto/README new file mode 100644 index 00000000..f1e0cbf9 --- /dev/null +++ b/vendor/golang.org/x/crypto/README @@ -0,0 +1,3 @@ +This repository holds supplementary Go cryptography libraries. + +To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/golang.org/x/net/README b/vendor/golang.org/x/net/README new file mode 100644 index 00000000..6b13d8e5 --- /dev/null +++ b/vendor/golang.org/x/net/README @@ -0,0 +1,3 @@ +This repository holds supplementary Go networking libraries. + +To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README new file mode 100644 index 00000000..360d5aa3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/README @@ -0,0 +1,20 @@ +This is a work-in-progress HTTP/2 implementation for Go. + +It will eventually live in the Go standard library and won't require +any changes to your code to use. It will just be automatic. + +Status: + +* The server support is pretty good. A few things are missing + but are being worked on. +* The client work has just started but shares a lot of code + is coming along much quicker. + +Docs are at https://godoc.org/golang.org/x/net/http2 + +Demo test server at https://http2.golang.org/ + +Help & bug reports welcome! + +Contributing: https://golang.org/doc/contribute.html +Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 00000000..0d514173 --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,64 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + diff --git a/vendor/golang.org/x/time/README b/vendor/golang.org/x/time/README new file mode 100644 index 00000000..144e347b --- /dev/null +++ b/vendor/golang.org/x/time/README @@ -0,0 +1 @@ +This repository provides supplementary Go time packages. diff --git a/vendor/google.golang.org/api/README.md b/vendor/google.golang.org/api/README.md new file mode 100644 index 00000000..b562c2f4 --- /dev/null +++ b/vendor/google.golang.org/api/README.md @@ -0,0 +1,92 @@ +# Google APIs Client Library for Go + +## Status +[![Build Status](https://travis-ci.org/google/google-api-go-client.png)](https://travis-ci.org/google/google-api-go-client) + +These are auto-generated Go libraries from the Google Discovery Service's JSON description files of the available "new style" Google APIs. + +Due to the auto-generated nature of this collection of libraries, complete APIs or specific versions can appear or go away without notice. +As a result, you should always locally vendor any API(s) that your code relies upon. + +Announcement email: + +* http://groups.google.com/group/golang-nuts/browse_thread/thread/6c7281450be9a21e + +Getting started documentation: + +* https://github.com/google/google-api-go-client/blob/master/GettingStarted.md + +In summary: + +``` +$ go get google.golang.org/api/storage/v1 +$ go get google.golang.org/api/tasks/v1 +$ go get google.golang.org/api/moderator/v1 +... etc ... +``` + +For docs, see e.g.: + +* https://godoc.org/google.golang.org/api/storage/v1 + +The package of a given import is the second-to-last component, before the version number. + +For examples, see: + +* https://github.com/google/google-api-go-client/tree/master/examples + +For support, use the golang-nuts@ mailing list: + +* https://groups.google.com/group/golang-nuts + +## Application Default Credentials Example + +Application Default Credentials provide a simplified way to obtain credentials +for authenticating with Google APIs. + +The Application Default Credentials authenticate as the application itself, +which make them great for working with Google Cloud APIs like Storage or +Datastore. They are the recommended form of authentication when building +applications that run on Google Compute Engine or Google App Engine. + +Default credentials are provided by the `golang.org/x/oauth2/google` package. To use them, add the following import: + +``` +import "golang.org/x/oauth2/google" +``` + +Some credentials types require you to specify scopes, and service entry points may not inject them. If you encounter this situation you may need to specify scopes as follows: + +``` +import ( + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + "google.golang.org/api/compute/v1" +) + +func main() { + // Use oauth2.NoContext if there isn't a good context to pass in. + ctx := context.Background() + + client, err := google.DefaultClient(ctx, compute.ComputeScope) + if err != nil { + //... + } + computeService, err := compute.New(client) + if err != nil { + //... + } +} +``` + +If you need a `oauth2.TokenSource`, use the `DefaultTokenSource` function: + +``` +ts, err := google.DefaultTokenSource(ctx, scope1, scope2, ...) +if err != nil { + //... +} +client := oauth2.NewClient(ctx, ts) +``` + +See also: [golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google) package documentation. diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md new file mode 100644 index 00000000..1dbb3341 --- /dev/null +++ b/vendor/google.golang.org/appengine/README.md @@ -0,0 +1,73 @@ +# Go App Engine packages + +[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) + +This repository supports the Go runtime on App Engine, +including both classic App Engine and Managed VMs. +It provides APIs for interacting with App Engine services. +Its canonical import path is `google.golang.org/appengine`. + +See https://cloud.google.com/appengine/docs/go/ +for more information. + +File issue reports and feature requests on the [Google App Engine issue +tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect). + +## Directory structure +The top level directory of this repository is the `appengine` package. It +contains the +basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API +packages are in subdirectories (e.g. `datastore`). + +There is an `internal` subdirectory that contains service protocol buffers, +plus packages required for connectivity to make API calls. App Engine apps +should not directly import any package under `internal`. + +## Updating a Go App Engine app + +This section describes how to update a traditional Go App Engine app to use +these packages. + +### 1. Update YAML files (Managed VMs only) + +The `app.yaml` file (and YAML files for modules) should have these new lines added: +``` +vm: true +``` +See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details. + +### 2. Update import paths + +The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. +You will need to update your code to use import paths starting with that; for instance, +code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. +You can do that manually, or by running this command to recursively update all Go source files in the current directory: +(may require GNU sed) +``` +sed -i '/"appengine/{s,"appengine,"google.golang.org/appengine,;s,appengine_,appengine/,}' \ + $(find . -name '*.go') +``` + +### 3. Update code using deprecated, removed or modified APIs + +Most App Engine services are available with exactly the same API. +A few APIs were cleaned up, and some are not available yet. +This list summarises the differences: + +* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. +* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. +* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. +* `appengine.Datacenter` now takes a `context.Context` argument. +* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. +* `delay.Call` now returns an error. +* `search.FieldLoadSaver` now handles document metadata. +* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the + `context.Context` instead. +* `aetest` no longer declares its own Context type, and uses the standard one instead. +* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been + deprecated and unused for a long time. +* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. + Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. +* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. + Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead. +* `appengine/socket` is not required on Managed VMs. Use the standard `net` package instead. diff --git a/vendor/google.golang.org/cloud/README.md b/vendor/google.golang.org/cloud/README.md new file mode 100644 index 00000000..10d3995d --- /dev/null +++ b/vendor/google.golang.org/cloud/README.md @@ -0,0 +1,135 @@ +# Google Cloud for Go + +[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang) + +**NOTE:** These packages are experimental, and may occasionally make +backwards-incompatible changes. + +**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). + +Go packages for Google Cloud Platform services. Supported APIs include: + + * Google Cloud Datastore + * Google Cloud Storage + * Google Cloud Pub/Sub + * Google Cloud Container Engine + +``` go +import "google.golang.org/cloud" +``` + +Documentation and examples are available at +[https://godoc.org/google.golang.org/cloud](https://godoc.org/google.golang.org/cloud). + +## Authorization + +Authorization, throughout the package, is delegated to the godoc.org/golang.org/x/oauth2. +Refer to the [godoc documentation](https://godoc.org/golang.org/x/oauth2) +for examples on using oauth2 with the Cloud package. + +## Google Cloud Datastore + +[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully +managed, schemaless database for storing non-relational data. Cloud Datastore +automatically scales with your users and supports ACID transactions, high availability +of reads and writes, strong consistency for reads and ancestor queries, and eventual +consistency for all other queries. + +Follow the [activation instructions][cloud-datastore-activation] to use the Google +Cloud Datastore API with your project. + +[https://godoc.org/google.golang.org/cloud/datastore](https://godoc.org/google.golang.org/cloud/datastore) + + +```go +type Post struct { + Title string + Body string `datastore:",noindex"` + PublishedAt time.Time +} +keys := []*datastore.Key{ + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), +} +posts := []*Post{ + {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, + {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, +} +if _, err := datastore.PutMulti(ctx, keys, posts); err != nil { + log.Println(err) +} +``` + +## Google Cloud Storage + +[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store +data on Google infrastructure with very high reliability, performance and availability, +and can be used to distribute large data objects to users via direct download. + +[https://godoc.org/google.golang.org/cloud/storage](https://godoc.org/google.golang.org/cloud/storage) + + +```go +// Read the object1 from bucket. +rc, err := storage.NewReader(ctx, "bucket", "object1") +if err != nil { + log.Fatal(err) +} +slurp, err := ioutil.ReadAll(rc) +rc.Close() +if err != nil { + log.Fatal(err) +} +``` + +## Google Cloud Pub/Sub (Alpha) + +> Google Cloud Pub/Sub is in **Alpha status**. As a result, it might change in +> backward-incompatible ways and is not recommended for production use. It is not +> subject to any SLA or deprecation policy. + +[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect +your services with reliable, many-to-many, asynchronous messaging hosted on Google's +infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation +for building your own robust, global services. + +[https://godoc.org/google.golang.org/cloud/pubsub](https://godoc.org/google.golang.org/cloud/pubsub) + + +```go +// Publish "hello world" on topic1. +msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{ + Data: []byte("hello world"), +}) +if err != nil { + log.Println(err) +} +// Pull messages via subscription1. +msgs, err := pubsub.Pull(ctx, "subscription1", 1) +if err != nil { + log.Println(err) +} +``` + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md) +document for details. We're using Gerrit for our code reviews. Please don't open pull +requests against this repo, new pull requests will be automatically closed. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. + +[cloud-datastore]: https://cloud.google.com/datastore/ +[cloud-datastore-docs]: https://cloud.google.com/datastore/docs +[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate + +[cloud-pubsub]: https://cloud.google.com/pubsub/ +[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs + +[cloud-storage]: https://cloud.google.com/storage/ +[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview +[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 00000000..37b05f09 --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,32 @@ +#gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) + +The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. + +Installation +------------ + +To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run: + +``` +$ go get google.golang.org/grpc +``` + +Prerequisites +------------- + +This requires Go 1.4 or above. + +Constraints +----------- +The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. + +Documentation +------------- +See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). + +Status +------ +Beta release + diff --git a/vendor/gopkg.in/check.v1/README.md b/vendor/gopkg.in/check.v1/README.md new file mode 100644 index 00000000..0ca9e572 --- /dev/null +++ b/vendor/gopkg.in/check.v1/README.md @@ -0,0 +1,20 @@ +Instructions +============ + +Install the package with: + + go get gopkg.in/check.v1 + +Import it with: + + import "gopkg.in/check.v1" + +and use _check_ as the package name inside the code. + +For more details, visit the project page: + +* http://labix.org/gocheck + +and the API documentation: + +* https://gopkg.in/check.v1 diff --git a/vendor/gopkg.in/square/go-jose.v1/README.md b/vendor/gopkg.in/square/go-jose.v1/README.md new file mode 100644 index 00000000..fd859da7 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/README.md @@ -0,0 +1,209 @@ +# Go JOSE + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) [![license](http://img.shields.io/badge/license-apache_2.0-red.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) [![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose) [![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. For the moment, it mainly focuses on encryption +and signing based on the JSON Web Encryption and JSON Web Signature standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) +standard (RFC 7516) and +[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) +standard (RFC 7515). Tables of supported algorithms are shown below. +The library supports both the compact and full serialization formats, and has +optional support for multiple recipients. It also comes with a small +command-line utility +([`jose-util`](https://github.com/square/go-jose/tree/master/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. If you do not like this behavior, you can use the +`std_json` build tag to disable it (though we do not recommend doing so). + +### Versions + +We use [gopkg.in](https://gopkg.in) for versioning. + +[Version 1](https://gopkg.in/square/go-jose.v1) is the current stable version: + + import "gopkg.in/square/go-jose.v1" + +The interface for [go-jose.v1](https://gopkg.in/square/go-jose.v1) will remain +backwards compatible. We're currently sketching out ideas for a new version, to +clean up the interface a bit. If you have ideas or feature requests [please let +us know](https://github.com/square/go-jose/issues/64)! + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the +[JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +standard where possible. The +[Godoc reference](https://godoc.org/github.com/square/go-jose#pkg-constants) +has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Note that if you are creating a new encrypter or signer with a +JsonWebKey, the key id of the JsonWebKey (if present) will be added to any +resulting messages. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) + ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) + AES, HMAC | []byte, *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) + +## Examples + +Encryption/decryption example using RSA: + +```Go +// Generate a public/private key pair to use for this example. The library +// also provides two utility functions (LoadPublicKey and LoadPrivateKey) +// that can be used to load keys from PEM/DER-encoded data. +privateKey, err := rsa.GenerateKey(rand.Reader, 2048) +if err != nil { + panic(err) +} + +// Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would +// indicate that the selected algorithm(s) are not currently supported. +publicKey := &privateKey.PublicKey +encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) +if err != nil { + panic(err) +} + +// Encrypt a sample plaintext. Calling the encrypter returns an encrypted +// JWE object, which can then be serialized for output afterwards. An error +// would indicate a problem in an underlying cryptographic primitive. +var plaintext = []byte("Lorem ipsum dolor sit amet") +object, err := encrypter.Encrypt(plaintext) +if err != nil { + panic(err) +} + +// Serialize the encrypted object using the full serialization format. +// Alternatively you can also use the compact format here by calling +// object.CompactSerialize() instead. +serialized := object.FullSerialize() + +// Parse the serialized, encrypted JWE object. An error would indicate that +// the given input did not represent a valid message. +object, err = ParseEncrypted(serialized) +if err != nil { + panic(err) +} + +// Now we can decrypt and get back our original plaintext. An error here +// would indicate the the message failed to decrypt, e.g. because the auth +// tag was broken or the message was tampered with. +decrypted, err := object.Decrypt(privateKey) +if err != nil { + panic(err) +} + +fmt.Printf(string(decrypted)) +// output: Lorem ipsum dolor sit amet +``` + +Signing/verification example using RSA: + +```Go +// Generate a public/private key pair to use for this example. The library +// also provides two utility functions (LoadPublicKey and LoadPrivateKey) +// that can be used to load keys from PEM/DER-encoded data. +privateKey, err := rsa.GenerateKey(rand.Reader, 2048) +if err != nil { + panic(err) +} + +// Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. +signer, err := NewSigner(PS512, privateKey) +if err != nil { + panic(err) +} + +// Sign a sample payload. Calling the signer returns a protected JWS object, +// which can then be serialized for output afterwards. An error would +// indicate a problem in an underlying cryptographic primitive. +var payload = []byte("Lorem ipsum dolor sit amet") +object, err := signer.Sign(payload) +if err != nil { + panic(err) +} + +// Serialize the encrypted object using the full serialization format. +// Alternatively you can also use the compact format here by calling +// object.CompactSerialize() instead. +serialized := object.FullSerialize() + +// Parse the serialized, protected JWS object. An error would indicate that +// the given input did not represent a valid message. +object, err = ParseSigned(serialized) +if err != nil { + panic(err) +} + +// Now we can verify the signature on the payload. An error here would +// indicate the the message failed to verify, e.g. because the signature was +// broken or the message was tampered with. +output, err := object.Verify(&privateKey.PublicKey) +if err != nil { + panic(err) +} + +fmt.Printf(string(output)) +// output: Lorem ipsum dolor sit amet +``` + +More examples can be found in the [Godoc +reference](https://godoc.org/github.com/square/go-jose) for this package. The +[`jose-util`](https://github.com/square/go-jose/tree/master/jose-util) +subdirectory also contains a small command-line utility which might +be useful as an example. diff --git a/vendor/gopkg.in/square/go-jose.v1/json/README.md b/vendor/gopkg.in/square/go-jose.v1/json/README.md new file mode 100644 index 00000000..86de5e55 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v1/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 00000000..8da58fbf --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 00000000..d6c919e6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,128 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct{C int; D []int ",flow"} +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/rsc.io/letsencrypt/README b/vendor/rsc.io/letsencrypt/README new file mode 100644 index 00000000..575ae16a --- /dev/null +++ b/vendor/rsc.io/letsencrypt/README @@ -0,0 +1,177 @@ +package letsencrypt // import "rsc.io/letsencrypt" + +Package letsencrypt obtains TLS certificates from LetsEncrypt.org. + +LetsEncrypt.org is a service that issues free SSL/TLS certificates to +servers that can prove control over the given domain's DNS records or the +servers pointed at by those records. + + +Warning + +Like any other random code you find on the internet, this package should not +be relied upon in important, production systems without thorough testing to +ensure that it meets your needs. + +In the long term you should be using +https://golang.org/x/crypto/acme/autocert instead of this package. Send +improvements there, not here. + +This is a package that I wrote for my own personal web sites (swtch.com, +rsc.io) in a hurry when my paid-for SSL certificate was expiring. It has no +tests, has barely been used, and there is some anecdotal evidence that it +does not properly renew certificates in a timely fashion, so servers that +run for more than 3 months may run into trouble. I don't run this code +anymore: to simplify maintenance, I moved the sites off of Ubuntu VMs and +onto Google App Engine, configured with inexpensive long-term certificates +purchased from cheapsslsecurity.com. + +This package was interesting primarily as an example of how simple the API +for using LetsEncrypt.org could be made, in contrast to the low-level +implementations that existed at the time. In that respect, it helped inform +the design of the golang.org/x/crypto/acme/autocert package. + + +Quick Start + +A complete HTTP/HTTPS web server using TLS certificates from +LetsEncrypt.org, redirecting all HTTP access to HTTPS, and maintaining TLS +certificates in a file letsencrypt.cache across server restarts. + + package main + + import ( + "fmt" + "log" + "net/http" + "rsc.io/letsencrypt" + ) + + func main() { + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, TLS!\n") + }) + var m letsencrypt.Manager + if err := m.CacheFile("letsencrypt.cache"); err != nil { + log.Fatal(err) + } + log.Fatal(m.Serve()) + } + + +Overview + +The fundamental type in this package is the Manager, which manages obtaining +and refreshing a collection of TLS certificates, typically for use by an +HTTPS server. The example above shows the most basic use of a Manager. The +use can be customized by calling additional methods of the Manager. + + +Registration + +A Manager m registers anonymously with LetsEncrypt.org, including agreeing +to the letsencrypt.org terms of service, the first time it needs to obtain a +certificate. To register with a particular email address and with the option +of a prompt for agreement with the terms of service, call m.Register. + + +GetCertificate + +The Manager's GetCertificate method returns certificates from the Manager's +cache, filling the cache by requesting certificates from LetsEncrypt.org. In +this way, a server with a tls.Config.GetCertificate set to m.GetCertificate +will demand load a certificate for any host name it serves. To force loading +of certificates ahead of time, install m.GetCertificate as before but then +call m.Cert for each host name. + +A Manager can only obtain a certificate for a given host name if it can +prove control of that host name to LetsEncrypt.org. By default it proves +control by answering an HTTPS-based challenge: when the LetsEncrypt.org +servers connect to the named host on port 443 (HTTPS), the TLS SNI handshake +must use m.GetCertificate to obtain a per-host certificate. The most common +way to satisfy this requirement is for the host name to resolve to the IP +address of a (single) computer running m.ServeHTTPS, or at least running a +Go TLS server with tls.Config.GetCertificate set to m.GetCertificate. +However, other configurations are possible. For example, a group of machines +could use an implementation of tls.Config.GetCertificate that cached +certificates but handled cache misses by making RPCs to a Manager m on an +elected leader machine. + +In typical usage, then, the setting of tls.Config.GetCertificate to +m.GetCertificate serves two purposes: it provides certificates to the TLS +server for ordinary serving, and it also answers challenges to prove +ownership of the domains in order to obtain those certificates. + +To force the loading of a certificate for a given host into the Manager's +cache, use m.Cert. + + +Persistent Storage + +If a server always starts with a zero Manager m, the server effectively +fetches a new certificate for each of its host name from LetsEncrypt.org on +each restart. This is unfortunate both because the server cannot start if +LetsEncrypt.org is unavailable and because LetsEncrypt.org limits how often +it will issue a certificate for a given host name (at time of writing, the +limit is 5 per week for a given host name). To save server state proactively +to a cache file and to reload the server state from that same file when +creating a new manager, call m.CacheFile with the name of the file to use. + +For alternate storage uses, m.Marshal returns the current state of the +Manager as an opaque string, m.Unmarshal sets the state of the Manager using +a string previously returned by m.Marshal (usually a different m), and +m.Watch returns a channel that receives notifications about state changes. + + +Limits + +To avoid hitting basic rate limits on LetsEncrypt.org, a given Manager +limits all its interactions to at most one request every minute, with an +initial allowed burst of 20 requests. + +By default, if GetCertificate is asked for a certificate it does not have, +it will in turn ask LetsEncrypt.org for that certificate. This opens a +potential attack where attackers connect to a server by IP address and +pretend to be asking for an incorrect host name. Then GetCertificate will +attempt to obtain a certificate for that host, incorrectly, eventually +hitting LetsEncrypt.org's rate limit for certificate requests and making it +impossible to obtain actual certificates. Because servers hold certificates +for months at a time, however, an attack would need to be sustained over a +time period of at least a month in order to cause real problems. + +To mitigate this kind of attack, a given Manager limits itself to an average +of one certificate request for a new host every three hours, with an initial +allowed burst of up to 20 requests. Long-running servers will therefore stay +within the LetsEncrypt.org limit of 300 failed requests per month. +Certificate refreshes are not subject to this limit. + +To eliminate the attack entirely, call m.SetHosts to enumerate the exact set +of hosts that are allowed in certificate requests. + + +Web Servers + +The basic requirement for use of a Manager is that there be an HTTPS server +running on port 443 and calling m.GetCertificate to obtain TLS certificates. +Using standard primitives, the way to do this is: + + srv := &http.Server{ + Addr: ":https", + TLSConfig: &tls.Config{ + GetCertificate: m.GetCertificate, + }, + } + srv.ListenAndServeTLS("", "") + +However, this pattern of serving HTTPS with demand-loaded TLS certificates +comes up enough to wrap into a single method m.ServeHTTPS. + +Similarly, many HTTPS servers prefer to redirect HTTP clients to the HTTPS +URLs. That functionality is provided by RedirectHTTP. + +The combination of serving HTTPS with demand-loaded TLS certificates and +serving HTTPS redirects to HTTP clients is provided by m.Serve, as used in +the original example above. + +func RedirectHTTP(w http.ResponseWriter, r *http.Request) +type Manager struct { ... } From 1f0a9dbca0fa5dd106adc3d190575d9dc6d8ac8d Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 7 Apr 2017 15:19:53 -0700 Subject: [PATCH 024/276] register/handlers: remove context manager Signed-off-by: Stephen J Day --- registry/handlers/app.go | 24 +++++++-------- registry/handlers/context.go | 60 ------------------------------------ 2 files changed, 11 insertions(+), 73 deletions(-) diff --git a/registry/handlers/app.go b/registry/handlers/app.go index 04e346cc..5393e88d 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -596,24 +596,19 @@ func (app *App) configureSecret(configuration *configuration.Configuration) { func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() // ensure that request body is always closed. - // Instantiate an http context here so we can track the error codes - // returned by the request router. - ctx := defaultContextManager.context(app, w, r) + // Prepare the context with our own little decorations. + ctx := r.Context() + ctx = ctxu.WithRequest(ctx, r) + ctx, w = ctxu.WithResponseWriter(ctx, w) + ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) + r = r.WithContext(ctx) defer func() { status, ok := ctx.Value("http.response.status").(int) if ok && status >= 200 && status <= 399 { - ctxu.GetResponseLogger(ctx).Infof("response completed") + ctxu.GetResponseLogger(r.Context()).Infof("response completed") } }() - defer defaultContextManager.release(ctx) - - // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. - var err error - w, err = ctxu.GetResponseWriter(ctx) - if err != nil { - ctxu.GetLogger(ctx).Warnf("response writer not found in context") - } // Set a header with the Docker Distribution API Version for all responses. w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") @@ -649,6 +644,9 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { // Add username to request logging context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, auth.UserNameKey)) + // sync up context on the request. + r = r.WithContext(context) + if app.nameRequired(r) { nameRef, err := reference.WithName(getName(context)) if err != nil { @@ -756,7 +754,7 @@ func (app *App) logError(context context.Context, errors errcode.Errors) { // context constructs the context object for the application. This only be // called once per request. func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { - ctx := defaultContextManager.context(app, w, r) + ctx := r.Context() ctx = ctxu.WithVars(ctx, r) ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "vars.name", diff --git a/registry/handlers/context.go b/registry/handlers/context.go index a49ad10e..6c1be5b3 100644 --- a/registry/handlers/context.go +++ b/registry/handlers/context.go @@ -3,7 +3,6 @@ package handlers import ( "fmt" "net/http" - "sync" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" @@ -91,62 +90,3 @@ func getUserName(ctx context.Context, r *http.Request) string { return username } - -// contextManager allows us to associate net/context.Context instances with a -// request, based on the memory identity of http.Request. This prepares http- -// level context, which is not application specific. If this is called, -// (*contextManager).release must be called on the context when the request is -// completed. -// -// Providing this circumvents a lot of necessity for dispatchers with the -// benefit of instantiating the request context much earlier. -// -// TODO(stevvooe): Consider making this facility a part of the context package. -type contextManager struct { - contexts map[*http.Request]context.Context - mu sync.Mutex -} - -// defaultContextManager is just a global instance to register request contexts. -var defaultContextManager = newContextManager() - -func newContextManager() *contextManager { - return &contextManager{ - contexts: make(map[*http.Request]context.Context), - } -} - -// context either returns a new context or looks it up in the manager. -func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { - cm.mu.Lock() - defer cm.mu.Unlock() - - ctx, ok := cm.contexts[r] - if ok { - return ctx - } - - if parent == nil { - parent = ctxu.Background() - } - - ctx = ctxu.WithRequest(parent, r) - ctx, w = ctxu.WithResponseWriter(ctx, w) - ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) - cm.contexts[r] = ctx - - return ctx -} - -// releases frees any associated with resources from request. -func (cm *contextManager) release(ctx context.Context) { - cm.mu.Lock() - defer cm.mu.Unlock() - - r, err := ctxu.GetRequest(ctx) - if err != nil { - ctxu.GetLogger(ctx).Errorf("no request found in context during release") - return - } - delete(cm.contexts, r) -} From 818ba4babfd4fa8c4c17f1462c20b9fe8e3b3697 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 7 Apr 2017 16:13:09 -0700 Subject: [PATCH 025/276] vendor: update gorilla/mux to be compatible with Go 1.7 Signed-off-by: Stephen J Day --- vendor.conf | 2 +- vendor/github.com/gorilla/mux/README.md | 341 +++++++++++++++++- .../github.com/gorilla/mux/context_gorilla.go | 26 ++ .../github.com/gorilla/mux/context_native.go | 24 ++ vendor/github.com/gorilla/mux/doc.go | 65 +++- vendor/github.com/gorilla/mux/mux.go | 257 +++++++++++-- vendor/github.com/gorilla/mux/regexp.go | 129 ++++--- vendor/github.com/gorilla/mux/route.go | 146 +++++++- 8 files changed, 881 insertions(+), 109 deletions(-) create mode 100644 vendor/github.com/gorilla/mux/context_gorilla.go create mode 100644 vendor/github.com/gorilla/mux/context_native.go diff --git a/vendor.conf b/vendor.conf index 85e5c94d..ecb56d05 100644 --- a/vendor.conf +++ b/vendor.conf @@ -13,7 +13,7 @@ github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b -github.com/gorilla/mux e444e69cbd2e2e3e0749a2f3c717cec491552bbf +github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index e60301b0..cdab8784 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,7 +1,340 @@ -mux +gorilla/mux === -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) +[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) +[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) -gorilla/mux is a powerful URL router and dispatcher. +![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png) -Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux +http://www.gorillatoolkit.org/pkg/mux + +Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to +their respective handler. + +The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: + +* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. +* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. +* URL hosts and paths can have variables with an optional regular expression. +* Registered URLs can be built, or "reversed", which helps maintaining references to resources. +* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. + +--- + +* [Install](#install) +* [Examples](#examples) +* [Matching Routes](#matching-routes) +* [Listing Routes](#listing-routes) +* [Static Files](#static-files) +* [Registered URLs](#registered-urls) +* [Full Example](#full-example) + +--- + +## Install + +With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: + +```sh +go get -u github.com/gorilla/mux +``` + +## Examples + +Let's start registering a couple of URL paths and handlers: + +```go +func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) +} +``` + +Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. + +Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/products/{key}", ProductHandler) +r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: + +```go +func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Category: %v\n", vars["category"]) +} +``` + +And this is all you need to know about the basic usage. More advanced options are explained below. + +### Matching Routes + +Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: + +```go +r := mux.NewRouter() +// Only matches if domain is "www.example.com". +r.Host("www.example.com") +// Matches a dynamic subdomain. +r.Host("{subdomain:[a-z]+}.domain.com") +``` + +There are several other matchers that can be added. To match path prefixes: + +```go +r.PathPrefix("/products/") +``` + +...or HTTP methods: + +```go +r.Methods("GET", "POST") +``` + +...or URL schemes: + +```go +r.Schemes("https") +``` + +...or header values: + +```go +r.Headers("X-Requested-With", "XMLHttpRequest") +``` + +...or query values: + +```go +r.Queries("key", "value") +``` + +...or to use a custom matcher function: + +```go +r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 +}) +``` + +...and finally, it is possible to combine several matchers in a single route: + +```go +r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") +``` + +Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". + +For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: + +```go +r := mux.NewRouter() +s := r.Host("www.example.com").Subrouter() +``` + +Then register routes in the subrouter: + +```go +s.HandleFunc("/products/", ProductsHandler) +s.HandleFunc("/products/{key}", ProductHandler) +s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: + +```go +r := mux.NewRouter() +s := r.PathPrefix("/products").Subrouter() +// "/products/" +s.HandleFunc("/", ProductsHandler) +// "/products/{key}/" +s.HandleFunc("/{key}/", ProductHandler) +// "/products/{key}/details" +s.HandleFunc("/{key}/details", ProductDetailsHandler) +``` + +### Listing Routes + +Routes on a mux can be listed using the Router.Walk method—useful for generating documentation: + +```go +package main + +import ( + "fmt" + "net/http" + + "github.com/gorilla/mux" +) + +func handler(w http.ResponseWriter, r *http.Request) { + return +} + +func main() { + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.HandleFunc("/products", handler) + r.HandleFunc("/articles", handler) + r.HandleFunc("/articles/{id}", handler) + r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + t, err := route.GetPathTemplate() + if err != nil { + return err + } + fmt.Println(t) + return nil + }) + http.Handle("/", r) +} +``` + +### Static Files + +Note that the path provided to `PathPrefix()` represents a "wildcard": calling +`PathPrefix("/static/").Handler(...)` means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + +```go +func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + +### Registered URLs + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") +``` + +To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: + +```go +url, err := r.Get("article").URL("category", "technology", "id", "42") +``` + +...and the result will be a `url.URL` with the following path: + +``` +"/articles/technology/42" +``` + +This also works for host variables: + +```go +r := mux.NewRouter() +r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// url.String() will be "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + +```go +r.HeadersRegexp("Content-Type", "application/(text|json)") +``` + +...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` + +There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: + +```go +// "http://news.domain.com/" +host, err := r.Get("article").URLHost("subdomain", "news") + +// "/articles/technology/42" +path, err := r.Get("article").URLPath("category", "technology", "id", "42") +``` + +And if you use subrouters, host and path defined separately can be built as well: + +```go +r := mux.NewRouter() +s := r.Host("{subdomain}.domain.com").Subrouter() +s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +## Full Example + +Here's a complete, runnable example of a small `mux` based server: + +```go +package main + +import ( + "net/http" + "log" + "github.com/gorilla/mux" +) + +func YourHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Gorilla!\n")) +} + +func main() { + r := mux.NewRouter() + // Routes consist of a path and a handler function. + r.HandleFunc("/", YourHandler) + + // Bind to a port and pass our router in + log.Fatal(http.ListenAndServe(":8000", r)) +} +``` + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/mux/context_gorilla.go b/vendor/github.com/gorilla/mux/context_gorilla.go new file mode 100644 index 00000000..d7adaa8f --- /dev/null +++ b/vendor/github.com/gorilla/mux/context_gorilla.go @@ -0,0 +1,26 @@ +// +build !go1.7 + +package mux + +import ( + "net/http" + + "github.com/gorilla/context" +) + +func contextGet(r *http.Request, key interface{}) interface{} { + return context.Get(r, key) +} + +func contextSet(r *http.Request, key, val interface{}) *http.Request { + if val == nil { + return r + } + + context.Set(r, key, val) + return r +} + +func contextClear(r *http.Request) { + context.Clear(r) +} diff --git a/vendor/github.com/gorilla/mux/context_native.go b/vendor/github.com/gorilla/mux/context_native.go new file mode 100644 index 00000000..209cbea7 --- /dev/null +++ b/vendor/github.com/gorilla/mux/context_native.go @@ -0,0 +1,24 @@ +// +build go1.7 + +package mux + +import ( + "context" + "net/http" +) + +func contextGet(r *http.Request, key interface{}) interface{} { + return r.Context().Value(key) +} + +func contextSet(r *http.Request, key, val interface{}) *http.Request { + if val == nil { + return r + } + + return r.WithContext(context.WithValue(r.Context(), key, val)) +} + +func contextClear(r *http.Request) { + return +} diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index b2deed34..00daf4a7 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. /* -Package gorilla/mux implements a request router and dispatcher. +Package mux implements a request router and dispatcher. The name mux stands for "HTTP request multiplexer". Like the standard http.ServeMux, mux.Router matches incoming requests against a list of @@ -47,12 +47,21 @@ variable will be anything until the next slash. For example: r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: + + r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) + The names are used to create a map of route variables which can be retrieved calling mux.Vars(): vars := mux.Vars(request) category := vars["category"] +Note that if any capturing groups are present, mux will panic() during parsing. To prevent +this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to +"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably +when capturing groups were present. + And this is all you need to know about the basic usage. More advanced options are explained below. @@ -60,8 +69,8 @@ Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: r := mux.NewRouter() - // Only matches if domain is "www.domain.com". - r.Host("www.domain.com") + // Only matches if domain is "www.example.com". + r.Host("www.example.com") // Matches a dynamic subdomain. r.Host("{subdomain:[a-z]+}.domain.com") @@ -89,12 +98,12 @@ There are several other matchers that can be added. To match path prefixes: r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { return r.ProtoMajor == 0 - }) + }) ...and finally, it is possible to combine several matchers in a single route: r.HandleFunc("/products", ProductsHandler). - Host("www.domain.com"). + Host("www.example.com"). Methods("GET"). Schemes("http") @@ -103,11 +112,11 @@ a way to group several routes that share the same requirements. We call it "subrouting". For example, let's say we have several URLs that should only match when the -host is "www.domain.com". Create a route for that host and get a "subrouter" +host is "www.example.com". Create a route for that host and get a "subrouter" from it: r := mux.NewRouter() - s := r.Host("www.domain.com").Subrouter() + s := r.Host("www.example.com").Subrouter() Then register routes in the subrouter: @@ -116,7 +125,7 @@ Then register routes in the subrouter: s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) The three URL paths we registered above will only be tested if the domain is -"www.domain.com", because the subrouter is tested first. This is not +"www.example.com", because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. @@ -136,6 +145,31 @@ the inner routes use it as base for their paths: // "/products/{key}/details" s.HandleFunc("/{key}/details", ProductDetailsHandler) +Note that the path provided to PathPrefix() represents a "wildcard": calling +PathPrefix("/static/").Handler(...) means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + + func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) + } + Now let's see how to build registered URLs. Routes can be named. All routes that define a name can have their URLs built, @@ -164,14 +198,21 @@ This also works for host variables: // url.String() will be "http://news.domain.com/articles/technology/42" url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") + "category", "technology", + "id", "42") All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + There's also a way to build only the URL host or path for a route: use the methods URLHost() or URLPath() instead. For the previous route, we would do: @@ -193,7 +234,7 @@ as well: // "http://news.domain.com/articles/technology/42" url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") + "category", "technology", + "id", "42") */ package mux diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index 5b5f8e7d..d66ec384 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -5,11 +5,12 @@ package mux import ( + "errors" "fmt" "net/http" "path" - - "github.com/gorilla/context" + "regexp" + "strings" ) // NewRouter returns a new router instance. @@ -46,8 +47,14 @@ type Router struct { namedRoutes map[string]*Route // See Router.StrictSlash(). This defines the flag for new routes. strictSlash bool - // If true, do not clear the request context after handling the request + // See Router.SkipClean(). This defines the flag for new routes. + skipClean bool + // If true, do not clear the request context after handling the request. + // This has no effect when go1.7+ is used, since the context is stored + // on the request itself. KeepContext bool + // see Router.UseEncodedPath(). This defines a flag for all routes. + useEncodedPath bool } // Match matches registered routes against the request. @@ -57,6 +64,12 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { return true } } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + return true + } return false } @@ -65,35 +78,38 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { // When there is a match, the route variables can be retrieved calling // mux.Vars(request). func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(req.URL.Path); p != req.URL.Path { + if !r.skipClean { + path := req.URL.Path + if r.useEncodedPath { + path = getPath(req) + } + // Clean path to canonical form and redirect. + if p := cleanPath(path); p != path { - // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() + // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } } var match RouteMatch var handler http.Handler if r.Match(req, &match) { handler = match.Handler - setVars(req, match.Vars) - setCurrentRoute(req, match.Route) + req = setVars(req, match.Vars) + req = setCurrentRoute(req, match.Route) } if handler == nil { - handler = r.NotFoundHandler - if handler == nil { - handler = http.NotFoundHandler() - } + handler = http.NotFoundHandler() } if !r.KeepContext { - defer context.Clear(req) + defer contextClear(req) } handler.ServeHTTP(w, req) } @@ -128,6 +144,34 @@ func (r *Router) StrictSlash(value bool) *Router { return r } +// SkipClean defines the path cleaning behaviour for new routes. The initial +// value is false. Users should be careful about which routes are not cleaned +// +// When true, if the route path is "/path//to", it will remain with the double +// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ +// +// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will +// become /fetch/http/xkcd.com/534 +func (r *Router) SkipClean(value bool) *Router { + r.skipClean = value + return r +} + +// UseEncodedPath tells the router to match the encoded original path +// to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". +// This behavior has the drawback of needing to match routes against +// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix) +// to r.URL.Path will not affect routing when this flag is on and thus may +// induce unintended behavior. +// +// If not called, the router will match the unencoded path to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" +func (r *Router) UseEncodedPath() *Router { + r.useEncodedPath = true + return r +} + // ---------------------------------------------------------------------------- // parentRoute // ---------------------------------------------------------------------------- @@ -152,13 +196,20 @@ func (r *Router) getRegexpGroup() *routeRegexpGroup { return nil } +func (r *Router) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + return m +} + // ---------------------------------------------------------------------------- // Route factories // ---------------------------------------------------------------------------- // NewRoute registers an empty route. func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash} + route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath} r.routes = append(r.routes, route) return route } @@ -224,6 +275,61 @@ func (r *Router) Schemes(schemes ...string) *Route { return r.NewRoute().Schemes(schemes...) } +// BuildVarsFunc registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" { + continue + } + + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + if err != nil { + return err + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + // ---------------------------------------------------------------------------- // Context // ---------------------------------------------------------------------------- @@ -244,32 +350,58 @@ const ( // Vars returns the route variables for the current request, if any. func Vars(r *http.Request) map[string]string { - if rv := context.Get(r, varsKey); rv != nil { + if rv := contextGet(r, varsKey); rv != nil { return rv.(map[string]string) } return nil } // CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns, unless the KeepContext option is set on the +// Router. func CurrentRoute(r *http.Request) *Route { - if rv := context.Get(r, routeKey); rv != nil { + if rv := contextGet(r, routeKey); rv != nil { return rv.(*Route) } return nil } -func setVars(r *http.Request, val interface{}) { - context.Set(r, varsKey, val) +func setVars(r *http.Request, val interface{}) *http.Request { + return contextSet(r, varsKey, val) } -func setCurrentRoute(r *http.Request, val interface{}) { - context.Set(r, routeKey, val) +func setCurrentRoute(r *http.Request, val interface{}) *http.Request { + return contextSet(r, routeKey, val) } // ---------------------------------------------------------------------------- // Helpers // ---------------------------------------------------------------------------- +// getPath returns the escaped path if possible; doing what URL.EscapedPath() +// which was added in go1.5 does +func getPath(req *http.Request) string { + if req.RequestURI != "" { + // Extract the path from RequestURI (which is escaped unlike URL.Path) + // as detailed here as detailed in https://golang.org/pkg/net/url/#URL + // for < 1.5 server side workaround + // http://localhost/path/here?v=1 -> /path/here + path := req.RequestURI + path = strings.TrimPrefix(path, req.URL.Scheme+`://`) + path = strings.TrimPrefix(path, req.URL.Host) + if i := strings.LastIndex(path, "?"); i > -1 { + path = path[:i] + } + if i := strings.LastIndex(path, "#"); i > -1 { + path = path[:i] + } + return path + } + return req.URL.Path +} + // cleanPath returns the canonical path for p, eliminating . and .. elements. // Borrowed from the net/http package. func cleanPath(p string) string { @@ -285,6 +417,7 @@ func cleanPath(p string) string { if p[len(p)-1] == '/' && np != "/" { np += "/" } + return np } @@ -300,13 +433,24 @@ func uniqueVars(s1, s2 []string) error { return nil } -// mapFromPairs converts variadic string parameters to a string map. -func mapFromPairs(pairs ...string) (map[string]string, error) { +// checkPairs returns the count of strings passed in, and an error if +// the count is not an even number. +func checkPairs(pairs ...string) (int, error) { length := len(pairs) if length%2 != 0 { - return nil, fmt.Errorf( + return length, fmt.Errorf( "mux: number of parameters must be multiple of 2, got %v", pairs) } + return length, nil +} + +// mapFromPairsToString converts variadic string parameters to a +// string to string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } m := make(map[string]string, length/2) for i := 0; i < length; i += 2 { m[pairs[i]] = pairs[i+1] @@ -314,6 +458,24 @@ func mapFromPairs(pairs ...string) (map[string]string, error) { return m, nil } +// mapFromPairsToRegex converts variadic string paramers to a +// string to regex map. +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + // matchInArray returns true if the given string value is in the array. func matchInArray(arr []string, value string) bool { for _, v := range arr { @@ -324,9 +486,8 @@ func matchInArray(arr []string, value string) bool { return false } -// matchMap returns true if the given key/value pairs exist in a given map. -func matchMap(toCheck map[string]string, toMatch map[string][]string, - canonicalKey bool) bool { +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { for k, v := range toCheck { // Check if key exists. if canonicalKey { @@ -351,3 +512,31 @@ func matchMap(toCheck map[string]string, toMatch map[string][]string, } return true } + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index a6305483..0189ad34 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "regexp" + "strconv" "strings" ) @@ -23,7 +24,7 @@ import ( // Previously we accepted only Python-like identifiers for variable // names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that // name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) { // Check if it is well-formed. idxs, errBraces := braceIndices(tpl) if errBraces != nil { @@ -34,8 +35,7 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash // Now let's parse it. defaultPattern := "[^/]+" if matchQuery { - defaultPattern = "[^?&]+" - matchPrefix = true + defaultPattern = "[^?&]*" } else if matchHost { defaultPattern = "[^.]+" matchPrefix = false @@ -53,9 +53,7 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash varsN := make([]string, len(idxs)/2) varsR := make([]*regexp.Regexp, len(idxs)/2) pattern := bytes.NewBufferString("") - if !matchQuery { - pattern.WriteByte('^') - } + pattern.WriteByte('^') reverse := bytes.NewBufferString("") var end int var err error @@ -75,9 +73,11 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash tpl[idxs[i]:end]) } // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) + // Build the reverse template. fmt.Fprintf(reverse, "%s%%s", raw) + // Append variable name and compiled pattern. varsN[i/2] = name varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) @@ -91,6 +91,12 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash if strictSlash { pattern.WriteString("[/]?") } + if matchQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } if !matchPrefix { pattern.WriteByte('$') } @@ -103,16 +109,24 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash if errCompile != nil { return nil, errCompile } + + // Check for capturing groups which used to work in older versions + if reg.NumSubexp() != len(idxs)/2 { + panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + + "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") + } + // Done! return &routeRegexp{ - template: template, - matchHost: matchHost, - matchQuery: matchQuery, - strictSlash: strictSlash, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + useEncodedPath: useEncodedPath, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, }, nil } @@ -127,6 +141,9 @@ type routeRegexp struct { matchQuery bool // The strictSlash value defined on the route, but disabled if PathPrefix was used. strictSlash bool + // Determines whether to use encoded path from getPath function or unencoded + // req.URL.Path for path matching + useEncodedPath bool // Expanded regexp. regexp *regexp.Regexp // Reverse template. @@ -141,20 +158,20 @@ type routeRegexp struct { func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { if !r.matchHost { if r.matchQuery { - return r.regexp.MatchString(req.URL.RawQuery) - } else { - return r.regexp.MatchString(req.URL.Path) + return r.matchQueryString(req) } + path := req.URL.Path + if r.useEncodedPath { + path = getPath(req) + } + return r.regexp.MatchString(path) } + return r.regexp.MatchString(getHost(req)) } // url builds a URL part using the given values. -func (r *routeRegexp) url(pairs ...string) (string, error) { - values, err := mapFromPairs(pairs...) - if err != nil { - return "", err - } +func (r *routeRegexp) url(values map[string]string) (string, error) { urlValues := make([]interface{}, len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] @@ -179,11 +196,31 @@ func (r *routeRegexp) url(pairs ...string) (string, error) { return rv, nil } +// getURLQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getURLQuery(req *http.Request) string { + if !r.matchQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + for key, vals := range req.URL.Query() { + if key == templateKey && len(vals) > 0 { + return key + "=" + vals[0] + } + } + return "" +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getURLQuery(req)) +} + // braceIndices returns the first level curly brace indices from a string. // It returns an error in case of unbalanced braces. func braceIndices(s string) ([]int, error) { var level, idx int - idxs := make([]int, 0) + var idxs []int for i := 0; i < len(s); i++ { switch s[i] { case '{': @@ -204,6 +241,11 @@ func braceIndices(s string) ([]int, error) { return idxs, nil } +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + // ---------------------------------------------------------------------------- // routeRegexpGroup // ---------------------------------------------------------------------------- @@ -219,23 +261,24 @@ type routeRegexpGroup struct { func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { // Store host variables. if v.host != nil { - hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) - if hostVars != nil { - for k, v := range v.host.varsN { - m.Vars[v] = hostVars[k+1] - } + host := getHost(req) + matches := v.host.regexp.FindStringSubmatchIndex(host) + if len(matches) > 0 { + extractVars(host, matches, v.host.varsN, m.Vars) } } + path := req.URL.Path + if r.useEncodedPath { + path = getPath(req) + } // Store path variables. if v.path != nil { - pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) - if pathVars != nil { - for k, v := range v.path.varsN { - m.Vars[v] = pathVars[k+1] - } + matches := v.path.regexp.FindStringSubmatchIndex(path) + if len(matches) > 0 { + extractVars(path, matches, v.path.varsN, m.Vars) // Check if we should redirect. if v.path.strictSlash { - p1 := strings.HasSuffix(req.URL.Path, "/") + p1 := strings.HasSuffix(path, "/") p2 := strings.HasSuffix(v.path.template, "/") if p1 != p2 { u, _ := url.Parse(req.URL.String()) @@ -250,13 +293,11 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) } } // Store query string variables. - rawQuery := req.URL.RawQuery for _, q := range v.queries { - queryVars := q.regexp.FindStringSubmatch(rawQuery) - if queryVars != nil { - for k, v := range q.varsN { - m.Vars[v] = queryVars[k+1] - } + queryURL := q.getURLQuery(req) + matches := q.regexp.FindStringSubmatchIndex(queryURL) + if len(matches) > 0 { + extractVars(queryURL, matches, q.varsN, m.Vars) } } } @@ -274,3 +315,9 @@ func getHost(r *http.Request) string { return host } + +func extractVars(input string, matches []int, names []string, output map[string]string) { + for i, name := range names { + output[name] = input[matches[2*i+2]:matches[2*i+3]] + } +} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index c310e66b..5544c1fd 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -9,6 +9,7 @@ import ( "fmt" "net/http" "net/url" + "regexp" "strings" ) @@ -25,12 +26,23 @@ type Route struct { // If true, when the path pattern is "/path/", accessing "/path" will // redirect to the former and vice versa. strictSlash bool + // If true, when the path pattern is "/path//to", accessing "/path//to" + // will not redirect + skipClean bool + // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" + useEncodedPath bool // If true, this route never matches: it is only used to build URLs. buildOnly bool // The name used to build URLs. name string // Error resulted from building a route. err error + + buildVarsFunc BuildVarsFunc +} + +func (r *Route) SkipClean() bool { + return r.skipClean } // Match matches the route against the request. @@ -141,14 +153,14 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery } r.regexp = r.getRegexpGroup() if !matchHost && !matchQuery { - if len(tpl) == 0 || tpl[0] != '/' { + if len(tpl) > 0 && tpl[0] != '/' { return fmt.Errorf("mux: path must start with a slash, got %q", tpl) } if r.regexp.path != nil { tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl } } - rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath) if err != nil { return err } @@ -186,7 +198,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery type headerMatcher map[string]string func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMap(m, r.Header, true) + return matchMapWithString(m, r.Header, true) } // Headers adds a matcher for request header values. @@ -197,22 +209,46 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { // "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both request header values match. -// -// It the value is an empty string, it will match any value if the key is set. +// If the value is an empty string, it will match any value if the key is set. func (r *Route) Headers(pairs ...string) *Route { if r.err == nil { var headers map[string]string - headers, r.err = mapFromPairs(pairs...) + headers, r.err = mapFromPairsToString(pairs...) return r.addMatcher(headerMatcher(headers)) } return r } +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex +// support. For example: +// +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + // Host ----------------------------------------------------------------------- // Host adds a matcher for the URL host. // It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to me matched: +// Variables can define an optional regexp pattern to be matched: // // - {name} matches anything until the next dot. // @@ -221,7 +257,7 @@ func (r *Route) Headers(pairs ...string) *Route { // For example: // // r := mux.NewRouter() -// r.Host("www.domain.com") +// r.Host("www.example.com") // r.Host("{subdomain}.domain.com") // r.Host("{subdomain:[a-z]+}.domain.com") // @@ -237,6 +273,7 @@ func (r *Route) Host(tpl string) *Route { // MatcherFunc is the function signature used by custom matchers. type MatcherFunc func(*http.Request, *RouteMatch) bool +// Match returns the match for a given request. func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { return m(r, match) } @@ -270,7 +307,7 @@ func (r *Route) Methods(methods ...string) *Route { // Path adds a matcher for the URL path. // It accepts a template with zero or more URL variables enclosed by {}. The // template must start with a "/". -// Variables can define an optional regexp pattern to me matched: +// Variables can define an optional regexp pattern to be matched: // // - {name} matches anything until the next slash. // @@ -321,7 +358,7 @@ func (r *Route) PathPrefix(tpl string) *Route { // // It the value is an empty string, it will match any value if the key is set. // -// Variables can define an optional regexp pattern to me matched: +// Variables can define an optional regexp pattern to be matched: // // - {name} matches anything until the next slash. // @@ -334,7 +371,7 @@ func (r *Route) Queries(pairs ...string) *Route { return nil } for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil { return r } } @@ -360,6 +397,19 @@ func (r *Route) Schemes(schemes ...string) *Route { return r.addMatcher(schemeMatcher(schemes)) } +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + r.buildVarsFunc = f + return r +} + // Subrouter ------------------------------------------------------------------ // Subrouter creates a subrouter for the route. @@ -367,7 +417,7 @@ func (r *Route) Schemes(schemes ...string) *Route { // It will test the inner routes only if the parent route matched. For example: // // r := mux.NewRouter() -// s := r.Host("www.domain.com").Subrouter() +// s := r.Host("www.example.com").Subrouter() // s.HandleFunc("/products/", ProductsHandler) // s.HandleFunc("/products/{key}", ProductHandler) // s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) @@ -422,17 +472,20 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) { if r.regexp == nil { return nil, errors.New("mux: route doesn't have a host or path") } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } var scheme, host, path string - var err error if r.regexp.host != nil { // Set a default scheme. scheme = "http" - if host, err = r.regexp.host.url(pairs...); err != nil { + if host, err = r.regexp.host.url(values); err != nil { return nil, err } } if r.regexp.path != nil { - if path, err = r.regexp.path.url(pairs...); err != nil { + if path, err = r.regexp.path.url(values); err != nil { return nil, err } } @@ -453,7 +506,11 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) { if r.regexp == nil || r.regexp.host == nil { return nil, errors.New("mux: route doesn't have a host") } - host, err := r.regexp.host.url(pairs...) + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) if err != nil { return nil, err } @@ -473,7 +530,11 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { if r.regexp == nil || r.regexp.path == nil { return nil, errors.New("mux: route doesn't have a path") } - path, err := r.regexp.path.url(pairs...) + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) if err != nil { return nil, err } @@ -482,6 +543,56 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { }, nil } +// GetPathTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp == nil || r.regexp.path == nil { + return "", errors.New("mux: route doesn't have a path") + } + return r.regexp.path.template, nil +} + +// GetHostTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a host. +func (r *Route) GetHostTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp == nil || r.regexp.host == nil { + return "", errors.New("mux: route doesn't have a host") + } + return r.regexp.host.template, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} + // ---------------------------------------------------------------------------- // parentRoute // ---------------------------------------------------------------------------- @@ -490,6 +601,7 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { type parentRoute interface { getNamedRoutes() map[string]*Route getRegexpGroup() *routeRegexpGroup + buildVars(map[string]string) map[string]string } // getNamedRoutes returns the map where named routes are registered. From 2f728896a0fbd29284970aa8befc6e8f06ffa9ea Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Wed, 12 Apr 2017 13:16:15 -0700 Subject: [PATCH 026/276] [CI] Fetch origin before diffing It turns out that origin/master may not be up to date in CircleCI checkouts. Fetch origin so that diffing for the vendor check can be done correctly. Note that this doesn't help the case of PRs against branches other than master, but the worst case is that those will waste a few seconds doing unnecessary vendor validation. Since those PRs are relatively rare, that seems fine. Signed-off-by: Aaron Lehmann --- circle.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/circle.yml b/circle.yml index b49d64ae..99a639c6 100644 --- a/circle.yml +++ b/circle.yml @@ -50,6 +50,8 @@ test: - gvm use stable && go version # Ensure validation of dependencies + - git fetch origin: + pwd: $BASE_STABLE - gvm use stable && if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi: pwd: $BASE_STABLE From ac05d143d8d96c56b6b9855bc4f3831a358a0974 Mon Sep 17 00:00:00 2001 From: "Yu Wang (UC)" Date: Thu, 13 Apr 2017 18:05:38 -0700 Subject: [PATCH 027/276] closes #2224: re-vendor the latest Azure Storage SDK for better performance Signed-off-by: Yu Wang --- registry/storage/driver/azure/azure.go | 10 +- vendor.conf | 4 +- .../Azure/azure-sdk-for-go/README.md | 136 +-- .../Azure/azure-sdk-for-go/storage/README.md | 5 - .../azure-sdk-for-go/storage/authorization.go | 223 ++++ .../Azure/azure-sdk-for-go/storage/blob.go | 656 ++--------- .../storage/blobserviceclient.go | 92 ++ .../Azure/azure-sdk-for-go/storage/client.go | 313 ++--- .../azure-sdk-for-go/storage/container.go | 376 ++++++ .../azure-sdk-for-go/storage/directory.go | 217 ++++ .../Azure/azure-sdk-for-go/storage/file.go | 1020 +++++------------ .../storage/fileserviceclient.go | 375 ++++++ .../Azure/azure-sdk-for-go/storage/queue.go | 45 +- .../storage/queueserviceclient.go | 20 + .../Azure/azure-sdk-for-go/storage/share.go | 186 +++ .../azure-sdk-for-go/storage/storagepolicy.go | 47 + .../storage/storageservice.go | 118 ++ .../Azure/azure-sdk-for-go/storage/table.go | 149 ++- .../storage/table_entities.go | 48 +- .../storage/tableserviceclient.go | 20 + .../Azure/azure-sdk-for-go/storage/version.go | 5 + vendor/github.com/Azure/go-autorest/LICENSE | 191 +++ vendor/github.com/Azure/go-autorest/README.md | 132 +++ .../Azure/go-autorest/autorest/autorest.go | 115 ++ .../Azure/go-autorest/autorest/azure/async.go | 308 +++++ .../Azure/go-autorest/autorest/azure/azure.go | 180 +++ .../go-autorest/autorest/azure/config.go | 13 + .../go-autorest/autorest/azure/devicetoken.go | 193 ++++ .../autorest/azure/environments.go | 167 +++ .../go-autorest/autorest/azure/persist.go | 59 + .../Azure/go-autorest/autorest/azure/token.go | 363 ++++++ .../Azure/go-autorest/autorest/client.go | 235 ++++ .../Azure/go-autorest/autorest/date/date.go | 82 ++ .../Azure/go-autorest/autorest/date/time.go | 89 ++ .../go-autorest/autorest/date/timerfc1123.go | 86 ++ .../go-autorest/autorest/date/utility.go | 11 + .../Azure/go-autorest/autorest/error.go | 80 ++ .../Azure/go-autorest/autorest/preparer.go | 443 +++++++ .../Azure/go-autorest/autorest/responder.go | 236 ++++ .../Azure/go-autorest/autorest/sender.go | 270 +++++ .../Azure/go-autorest/autorest/utility.go | 178 +++ .../Azure/go-autorest/autorest/version.go | 23 + vendor/github.com/dgrijalva/jwt-go/LICENSE | 8 + vendor/github.com/dgrijalva/jwt-go/README.md | 85 ++ vendor/github.com/dgrijalva/jwt-go/claims.go | 134 +++ vendor/github.com/dgrijalva/jwt-go/doc.go | 4 + vendor/github.com/dgrijalva/jwt-go/ecdsa.go | 147 +++ .../dgrijalva/jwt-go/ecdsa_utils.go | 67 ++ vendor/github.com/dgrijalva/jwt-go/errors.go | 59 + vendor/github.com/dgrijalva/jwt-go/hmac.go | 94 ++ .../github.com/dgrijalva/jwt-go/map_claims.go | 94 ++ vendor/github.com/dgrijalva/jwt-go/none.go | 52 + vendor/github.com/dgrijalva/jwt-go/parser.go | 131 +++ vendor/github.com/dgrijalva/jwt-go/rsa.go | 100 ++ vendor/github.com/dgrijalva/jwt-go/rsa_pss.go | 126 ++ .../github.com/dgrijalva/jwt-go/rsa_utils.go | 69 ++ .../dgrijalva/jwt-go/signing_method.go | 35 + vendor/github.com/dgrijalva/jwt-go/token.go | 108 ++ 58 files changed, 7158 insertions(+), 1674 deletions(-) delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/container.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/share.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/version.go create mode 100644 vendor/github.com/Azure/go-autorest/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/README.md create mode 100644 vendor/github.com/Azure/go-autorest/autorest/autorest.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/async.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/azure.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/config.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/environments.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/persist.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/token.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/client.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/date.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/time.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/utility.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/error.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/preparer.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/responder.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/sender.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/version.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/LICENSE create mode 100644 vendor/github.com/dgrijalva/jwt-go/README.md create mode 100644 vendor/github.com/dgrijalva/jwt-go/claims.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/doc.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/ecdsa.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/errors.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/hmac.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/map_claims.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/none.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/parser.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa_pss.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa_utils.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/signing_method.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/token.go diff --git a/registry/storage/driver/azure/azure.go b/registry/storage/driver/azure/azure.go index cfffad1c..45d2b1e5 100644 --- a/registry/storage/driver/azure/azure.go +++ b/registry/storage/driver/azure/azure.go @@ -86,7 +86,8 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) { blobClient := api.GetBlobService() // Create registry container - if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { + containerRef := blobClient.GetContainerReference(container) + if _, err = containerRef.CreateIfNotExists(); err != nil { return nil, err } @@ -237,7 +238,9 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, if !strings.HasSuffix(virtContainerPath, "/") { virtContainerPath += "/" } - blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + + containerRef := d.client.GetContainerReference(d.container) + blobs, err := containerRef.ListBlobs(azure.ListBlobsParameters{ Prefix: virtContainerPath, MaxResults: 1, }) @@ -374,8 +377,9 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) { out := []string{} marker := "" + containerRef := d.client.GetContainerReference(d.container) for { - resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ + resp, err := containerRef.ListBlobs(azure.ListBlobsParameters{ Marker: marker, Prefix: virtPath, }) diff --git a/vendor.conf b/vendor.conf index ecb56d05..5b12dc49 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,4 +1,5 @@ -github.com/Azure/azure-sdk-for-go c6f0533defaaaa26ea4dff3c9774e36033088112 +github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e +github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 github.com/Sirupsen/logrus d26492970760ca5d33129d2d799e34be5c4782eb github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716 github.com/bshuster-repo/logrus-logstash-hook 5f729f2fb50a301153cae84ff5c58981d51c095a @@ -6,6 +7,7 @@ github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 +github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/README.md b/vendor/github.com/Azure/azure-sdk-for-go/README.md index 13d3c4b8..8de42e2d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/README.md @@ -1,116 +1,60 @@ # Microsoft Azure SDK for Go +[![GoDoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go) +[![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go) +[![Go Report Card](https://goreportcard.com/badge/github.com/Azure/azure-sdk-for-go)](https://goreportcard.com/report/github.com/Azure/azure-sdk-for-go) -This project provides various Go packages to perform operations -on Microsoft Azure REST APIs. -[![GoDoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go) [![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go) +This is Microsoft Azure's core repository for hosting Go packages which offer a more convenient way of targeting Azure +REST endpoints. Here, you'll find a mix of code generated by [Autorest](https://github.com/Azure/autorest) and hand +maintained packages. -> **NOTE:** This repository is under heavy ongoing development and -is likely to break over time. We currently do not have any releases -yet. If you are planning to use the repository, please consider vendoring -the packages in your project and update them when a stable tag is out. - -# Packages - -## Azure Resource Manager (ARM) - -[About ARM](/arm/README.md) - -- [analysisservices](/arm/analysisservices) -- [authorization](/arm/authorization) -- [batch](/arm/batch) -- [cdn](/arm/cdn) -- [cognitiveservices](/arm/cognitiveservices) -- [commerce](/arm/commerce) -- [compute](/arm/compute) -- [containerregistry](/arm/containerregistry) -- [containerservice](/arm/containerservice) -- [datalake-analytics/account](/arm/datalake-analytics/account) -- [datalake-store/account](/arm/datalake-store/account) -- [devtestlabs](/arm/devtestlabs) -- [dns](/arm/dns) -- [documentdb](/arm/documentdb) -- [eventhub](/arm/eventhub) -- [intune](/arm/intune) -- [iothub](/arm/iothub) -- [keyvault](/arm/keyvault) -- [logic](/arm/logic) -- [machinelearning/commitmentplans](/arm/machinelearning/commitmentplans) -- [machinelearning/webservices](/arm/machinelearning/webservices) -- [mediaservices](/arm/mediaservices) -- [mobileengagement](/arm/mobileengagement) -- [network](/arm/network) -- [notificationhubs](/arm/notificationhubs) -- [powerbiembedded](/arm/powerbiembedded) -- [recoveryservices](/arm/recoveryservices) -- [redis](/arm/redis) -- [resources/features](/arm/resources/features) -- [resources/links](/arm/resources/links) -- [resources/locks](/arm/resources/locks) -- [resources/policy](/arm/resources/policy) -- [resources/resources](/arm/resources/resources) -- [resources/subscriptions](/arm/resources/subscriptions) -- [scheduler](/arm/scheduler) -- [search](/arm/search) -- [servermanagement](/arm/servermanagement) -- [servicebus](/arm/servicebus) -- [sql](/arm/sql) -- [storage](/arm/storage) -- [trafficmanager](/arm/trafficmanager) -- [web](/arm/web) - -## Azure Service Management (ASM), aka classic deployment - -[About ASM](/management/README.md) - -- [affinitygroup](/management/affinitygroup) -- [hostedservice](/management/hostedservice) -- [location](/management/location) -- [networksecuritygroup](/management/networksecuritygroup) -- [osimage](/management/osimage) -- [sql](/management/sql) -- [storageservice](/management/storageservice) -- [virtualmachine](/management/virtualmachine) -- [virtualmachinedisk](/management/virtualmachinedisk) -- [virtualmachineimage](/management/virtualmachineimage) -- [virtualnetwork](/management/virtualnetwork) -- [vmutils](/management/vmutils) - -## Azure Storage SDK for Go - -[About Storage](/storage/README.md) - -- [storage](/storage) +> **NOTE:** This repository is under heavy ongoing development and should be considered a preview. Vendoring your +dependencies is always a good idea, but it is doubly important if you're consuming this library. # Installation - -- [Install Go 1.7](https://golang.org/dl/). - +- If you don't already have it, install [the Go Programming Language](https://golang.org/dl/). - Go get the SDK: ``` -$ go get -d github.com/Azure/azure-sdk-for-go +$ go get -u github.com/Azure/azure-sdk-for-go ``` -> **IMPORTANT:** We highly suggest vendoring Azure SDK for Go as a dependency. For vendoring dependencies, Azure SDK for Go uses [glide](https://github.com/Masterminds/glide). If you haven't already, install glide. Navigate to your project directory and install the dependencies. +> **IMPORTANT:** We highly suggest vendoring Azure SDK for Go as a dependency. For vendoring dependencies, Azure SDK +for Go uses [glide](https://github.com/Masterminds/glide). -``` -$ cd your/project -$ glide create -$ glide install -``` +# Versioning +## SDK Versions +The tags in this repository are based on, but do not conform to [SemVer.org's recommendations](http://semver.org/). +For now, the "-beta" tag is an indicator that we are still in preview and still are planning on releasing some breaking +changes. + +In repositories that are children of this one, [storage for example](https://github.com/Azure/azure-storage-go), we +have adopted SemVer.org's recommendations. + +## Azure Versions +Azure services _mostly_ do not use SemVer based versions. Rather, they use profiles identified by dates. One will often +see this casually referred to as an "APIVersion". At the moment, our SDK only supports the most recent profiles. In +order to lock to an API version, one must also lock to an SDK version. However, as discussed in +[#517](https://github.com/Azure/azure-sdk-for-go/issues/517), our objective is to reorganize and publish independent +packages for each profile. In that way, we'll be able to have parallel support in a single SDK version for all +APIVersions supported by Azure. # Documentation -Read the Godoc of the repository at [Godoc.org](http://godoc.org/github.com/Azure/azure-sdk-for-go/). - -# Contribute - -If you would like to become an active contributor to this project please follow the instructions provided in [Microsoft Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/). +- Azure SDK for Go Documentation is available at [GoDoc.org](http://godoc.org/github.com/Azure/azure-sdk-for-go/). +- Azure REST APIs used by packages in this repository are documented at [Microsoft Docs, Azure REST](https://docs.microsoft.com/en-us/rest/api/). +- Azure Services are discussed in detail at [Microsoft Docs, Azure Services](https://docs.microsoft.com/en-us/azure/#pivot=services). # License This project is published under [Apache 2.0 License](LICENSE). ------ -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +# Contribute + +If you would like to become an active contributor to this project please follow the instructions provided in [Microsoft +Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/). + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md deleted file mode 100644 index 0ab09984..00000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Azure Storage SDK for Go - -The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package. - -This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go new file mode 100644 index 00000000..89a0d0b3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go @@ -0,0 +1,223 @@ +// Package storage provides clients for Microsoft Azure Storage Services. +package storage + +import ( + "bytes" + "fmt" + "net/url" + "sort" + "strings" +) + +// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services + +type authentication string + +const ( + sharedKey authentication = "sharedKey" + sharedKeyForTable authentication = "sharedKeyTable" + sharedKeyLite authentication = "sharedKeyLite" + sharedKeyLiteForTable authentication = "sharedKeyLiteTable" + + // headers + headerAuthorization = "Authorization" + headerContentLength = "Content-Length" + headerDate = "Date" + headerXmsDate = "x-ms-date" + headerXmsVersion = "x-ms-version" + headerContentEncoding = "Content-Encoding" + headerContentLanguage = "Content-Language" + headerContentType = "Content-Type" + headerContentMD5 = "Content-MD5" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" +) + +func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { + authHeader, err := c.getSharedKey(verb, url, headers, auth) + if err != nil { + return nil, err + } + headers[headerAuthorization] = authHeader + return headers, nil +} + +func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { + canRes, err := c.buildCanonicalizedResource(url, auth) + if err != nil { + return "", err + } + + canString, err := buildCanonicalizedString(verb, headers, canRes, auth) + if err != nil { + return "", err + } + return c.createAuthorizationHeader(canString, auth), nil +} + +func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := bytes.NewBufferString("/") + cr.WriteString(c.getCanonicalizedAccountName()) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 + if auth == sharedKey { + if len(params) > 0 { + cr.WriteString("\n") + + keys := []string{} + for key := range params { + keys = append(keys, key) + } + sort.Strings(keys) + + completeParams := []string{} + for _, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) + } + cr.WriteString(strings.Join(completeParams, "\n")) + } + } else { + // search for "comp" parameter, if exists then add it to canonicalizedresource + if v, ok := params["comp"]; ok { + cr.WriteString("?comp=" + v[0]) + } + } + + return string(cr.Bytes()), nil +} + +func (c *Client) getCanonicalizedAccountName() string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(c.accountName, "-secondary") +} + +func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) { + contentLength := headers[headerContentLength] + if contentLength == "0" { + contentLength = "" + } + date := headers[headerDate] + if v, ok := headers[headerXmsDate]; ok { + if auth == sharedKey || auth == sharedKeyLite { + date = "" + } else { + date = v + } + } + var canString string + switch auth { + case sharedKey: + canString = strings.Join([]string{ + verb, + headers[headerContentEncoding], + headers[headerContentLanguage], + contentLength, + headers[headerContentMD5], + headers[headerContentType], + date, + headers[headerIfModifiedSince], + headers[headerIfMatch], + headers[headerIfNoneMatch], + headers[headerIfUnmodifiedSince], + headers[headerRange], + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case sharedKeyForTable: + canString = strings.Join([]string{ + verb, + headers[headerContentMD5], + headers[headerContentType], + date, + canonicalizedResource, + }, "\n") + case sharedKeyLite: + canString = strings.Join([]string{ + verb, + headers[headerContentMD5], + headers[headerContentType], + date, + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case sharedKeyLiteForTable: + canString = strings.Join([]string{ + date, + canonicalizedResource, + }, "\n") + default: + return "", fmt.Errorf("%s authentication is not supported yet", auth) + } + return canString, nil +} + +func buildCanonicalizedHeader(headers map[string]string) string { + cm := make(map[string]string) + + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v + } + } + + if len(cm) == 0 { + return "" + } + + keys := []string{} + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := bytes.NewBufferString("") + + for _, key := range keys { + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(cm[key]) + ch.WriteRune('\n') + } + + return strings.TrimSuffix(string(ch.Bytes()), "\n") +} + +func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string { + signature := c.computeHmac256(canonicalizedString) + var key string + switch auth { + case sharedKey, sharedKeyForTable: + key = "SharedKey" + case sharedKeyLite, sharedKeyLiteForTable: + key = "SharedKeyLite" + } + return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go index 3dbaca52..6b332ea4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -14,44 +13,6 @@ import ( "time" ) -// BlobStorageClient contains operations for Microsoft Azure Blob Storage -// Service. -type BlobStorageClient struct { - client Client -} - -// A Container is an entry in ContainerListResponse. -type Container struct { - Name string `xml:"Name"` - Properties ContainerProperties `xml:"Properties"` - // TODO (ahmetalpbalkan) Metadata -} - -// ContainerProperties contains various properties of a container returned from -// various endpoints like ListContainers. -type ContainerProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` - // TODO (ahmetalpbalkan) remaining fields -} - -// ContainerListResponse contains the response fields from -// ListContainers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ContainerListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Containers []Container `xml:"Containers>Container"` -} - // A Blob is an entry in BlobListResponse. type Blob struct { Name string `xml:"Name"` @@ -123,6 +84,7 @@ type BlobProperties struct { CopyCompletionTime string `xml:"CopyCompletionTime"` CopyStatusDescription string `xml:"CopyStatusDescription"` LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` } // BlobHeaders contains various properties of a blob and is an entry @@ -135,101 +97,6 @@ type BlobHeaders struct { CacheControl string `header:"x-ms-blob-cache-control"` } -// BlobListResponse contains the response fields from ListBlobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type BlobListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Blobs []Blob `xml:"Blobs>Blob"` - - // BlobPrefix is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - // The list here can be thought of as "folders" that may contain - // other folders or blobs. - BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` - - // Delimiter is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - Delimiter string `xml:"Delimiter"` -} - -// ListContainersParameters defines the set of customizable parameters to make a -// List Containers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ListContainersParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -func (p ListContainersParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -// ListBlobsParameters defines the set of customizable -// parameters to make a List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type ListBlobsParameters struct { - Prefix string - Delimiter string - Marker string - Include string - MaxResults uint - Timeout uint -} - -func (p ListBlobsParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Delimiter != "" { - out.Set("delimiter", p.Delimiter) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - // BlobType defines the type of the Azure Blob. type BlobType string @@ -260,7 +127,7 @@ const ( // lease constants. const ( leaseHeaderPrefix = "x-ms-lease-" - leaseID = "x-ms-lease-id" + headerLeaseID = "x-ms-lease-id" leaseAction = "x-ms-lease-action" leaseBreakPeriod = "x-ms-lease-break-period" leaseDuration = "x-ms-lease-duration" @@ -288,79 +155,6 @@ const ( BlockListTypeUncommitted BlockListType = "uncommitted" ) -// ContainerAccessType defines the access level to the container from a public -// request. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- -// blob-public-access" header. -type ContainerAccessType string - -// Access options for containers -const ( - ContainerAccessTypePrivate ContainerAccessType = "" - ContainerAccessTypeBlob ContainerAccessType = "blob" - ContainerAccessTypeContainer ContainerAccessType = "container" -) - -// ContainerAccessOptions are used when setting ACLs of containers (after creation) -type ContainerAccessOptions struct { - ContainerAccess ContainerAccessType - Timeout int - LeaseID string -} - -// AccessPolicyDetails are used for SETTING policies -type AccessPolicyDetails struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanWrite bool - CanDelete bool -} - -// ContainerPermissions is used when setting permissions and Access Policies for containers. -type ContainerPermissions struct { - AccessOptions ContainerAccessOptions - AccessPolicy AccessPolicyDetails -} - -// AccessPolicyDetailsXML has specifics about an access policy -// annotated with XML details. -type AccessPolicyDetailsXML struct { - StartTime time.Time `xml:"Start"` - ExpiryTime time.Time `xml:"Expiry"` - Permission string `xml:"Permission"` -} - -// SignedIdentifier is a wrapper for a specific policy -type SignedIdentifier struct { - ID string `xml:"Id"` - AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"` -} - -// SignedIdentifiers part of the response from GetPermissions call. -type SignedIdentifiers struct { - SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` -} - -// AccessPolicy is the response type from the GetPermissions call. -type AccessPolicy struct { - SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"` -} - -// ContainerAccessResponse is returned for the GetContainerPermissions function. -// This contains both the permission and access policy for the container. -type ContainerAccessResponse struct { - ContainerAccess ContainerAccessType - AccessPolicy SignedIdentifiers -} - -// ContainerAccessHeader references header used when setting/getting container ACL -const ( - ContainerAccessHeader string = "x-ms-blob-public-access" -) - // Maximum sizes (per REST API) for various concepts const ( MaxBlobBlockSize = 4 * 1024 * 1024 @@ -401,7 +195,7 @@ type BlockResponse struct { Size int64 `xml:"Size"` } -// GetPageRangesResponse contains the reponse fields from +// GetPageRangesResponse contains the response fields from // Get Page Ranges call. // // See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx @@ -424,244 +218,14 @@ var ( errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") ) -// ListContainers returns the list of containers in a storage account along with -// pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, "", q) - headers := b.client.getStandardHeaders() - - var out ContainerListResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// CreateContainer creates a blob container within the storage account -// with given name and access level. Returns error if container already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx -func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error { - resp, err := b.createContainer(name, access) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// CreateContainerIfNotExists creates a blob container if it does not exist. Returns -// true if container is newly created or false if container already exists. -func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) { - resp, err := b.createContainer(name, access) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil - } - } - return false, err -} - -func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) { - verb := "PUT" - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - - headers := b.client.getStandardHeaders() - if access != "" { - headers[ContainerAccessHeader] = string(access) - } - return b.client.exec(verb, uri, headers, nil) -} - -// ContainerExists returns true if a container with given name exists -// on the storage account, otherwise returns false. -func (b BlobStorageClient) ContainerExists(name string) (bool, error) { - verb := "HEAD" - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - headers := b.client.getStandardHeaders() - - resp, err := b.client.exec(verb, uri, headers, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// SetContainerPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx -func (b BlobStorageClient) SetContainerPermissions(container string, containerPermissions ContainerPermissions) (err error) { - params := url.Values{ - "restype": {"container"}, - "comp": {"acl"}, - } - - if containerPermissions.AccessOptions.Timeout > 0 { - params.Add("timeout", strconv.Itoa(containerPermissions.AccessOptions.Timeout)) - } - - uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), params) - headers := b.client.getStandardHeaders() - if containerPermissions.AccessOptions.ContainerAccess != "" { - headers[ContainerAccessHeader] = string(containerPermissions.AccessOptions.ContainerAccess) - } - - if containerPermissions.AccessOptions.LeaseID != "" { - headers[leaseID] = containerPermissions.AccessOptions.LeaseID - } - - // generate the XML for the SharedAccessSignature if required. - accessPolicyXML, err := generateAccessPolicy(containerPermissions.AccessPolicy) - if err != nil { - return err - } - - var resp *storageResponse - if accessPolicyXML != "" { - headers["Content-Length"] = strconv.Itoa(len(accessPolicyXML)) - resp, err = b.client.exec("PUT", uri, headers, strings.NewReader(accessPolicyXML)) - } else { - resp, err = b.client.exec("PUT", uri, headers, nil) - } - - if err != nil { - return err - } - - if resp != nil { - defer func() { - err = resp.body.Close() - }() - - if resp.statusCode != http.StatusOK { - return errors.New("Unable to set permissions") - } - } - return nil -} - -// GetContainerPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx -// If timeout is 0 then it will not be passed to Azure -// leaseID will only be passed to Azure if populated -// Returns permissionResponse which is combined permissions and AccessPolicy -func (b BlobStorageClient) GetContainerPermissions(container string, timeout int, leaseID string) (permissionResponse *ContainerAccessResponse, err error) { - params := url.Values{"restype": {"container"}, - "comp": {"acl"}} - - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), params) - headers := b.client.getStandardHeaders() - - if leaseID != "" { - headers[leaseID] = leaseID - } - - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return nil, err - } - - // containerAccess. Blob, Container, empty - containerAccess := resp.headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) - - defer func() { - err = resp.body.Close() - }() - - var out AccessPolicy - err = xmlUnmarshal(resp.body, &out.SignedIdentifiersList) - if err != nil { - return nil, err - } - - permissionResponse = &ContainerAccessResponse{} - permissionResponse.AccessPolicy = out.SignedIdentifiersList - permissionResponse.ContainerAccess = ContainerAccessType(containerAccess) - - return permissionResponse, nil -} - -// DeleteContainer deletes the container with given name on the storage -// account. If the container does not exist returns error. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (b BlobStorageClient) DeleteContainer(name string) error { - resp, err := b.deleteContainer(name) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// DeleteContainerIfExists deletes the container with given name on the storage -// account if it exists. Returns true if container is deleted with this call, or -// false if the container did not exist at the time of the Delete Container -// operation. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) { - resp, err := b.deleteContainer(name) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) { - verb := "DELETE" - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - - headers := b.client.getStandardHeaders() - return b.client.exec(verb, uri, headers, nil) -} - -// ListBlobs returns an object that contains list of blobs in the container, -// pagination token and other information in the response of List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{ - "restype": {"container"}, - "comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q) - headers := b.client.getStandardHeaders() - - var out BlobListResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - // BlobExists returns true if a blob with given name exists on the specified // container of the storage account. func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { - verb := "HEAD" uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) headers := b.client.getStandardHeaders() - resp, err := b.client.exec(verb, uri, headers, nil) + resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth) if resp != nil { - defer resp.body.Close() + defer readAndCloseBody(resp.body) if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { return resp.statusCode == http.StatusOK, nil } @@ -670,14 +234,15 @@ func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { } // GetBlobURL gets the canonical URL to the blob with the specified name in the -// specified container. This method does not create a publicly accessible URL if -// the blob or container is private and this method does not check if the blob -// exists. +// specified container. If name is not specified, the canonical URL for the entire +// container is obtained. +// This method does not create a publicly accessible URL if the blob or container +// is private and this method does not check if the blob exists. func (b BlobStorageClient) GetBlobURL(container, name string) string { if container == "" { container = "$root" } - return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + return b.client.getEndpoint(blobServiceName, pathForResource(container, name), url.Values{}) } // GetBlob returns a stream to read the blob. Caller must call Close() the @@ -713,9 +278,9 @@ func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extr } func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) { - verb := "GET" uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + extraHeaders = b.client.protectUserAgent(extraHeaders) headers := b.client.getStandardHeaders() if bytesRange != "" { headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) @@ -725,23 +290,23 @@ func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extr headers[k] = v } - resp, err := b.client.exec(verb, uri, headers, nil) + resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) if err != nil { return nil, err } return resp, err } -// leasePut is common PUT code for the various aquire/release/break etc functions. +// leasePut is common PUT code for the various acquire/release/break etc functions. func (b BlobStorageClient) leaseCommonPut(container string, name string, headers map[string]string, expectedStatus int) (http.Header, error) { params := url.Values{"comp": {"lease"}} uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - resp, err := b.client.exec("PUT", uri, headers, nil) + resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) if err != nil { return nil, err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil { return nil, err @@ -752,6 +317,7 @@ func (b BlobStorageClient) leaseCommonPut(container string, name string, headers // SnapshotBlob creates a snapshot for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout int, extraHeaders map[string]string) (snapshotTimestamp *time.Time, err error) { + extraHeaders = b.client.protectUserAgent(extraHeaders) headers := b.client.getStandardHeaders() params := url.Values{"comp": {"snapshot"}} @@ -764,11 +330,13 @@ func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout i } uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { + resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) + if err != nil || resp == nil { return nil, err } + defer readAndCloseBody(resp.body) + if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { return nil, err } @@ -788,14 +356,22 @@ func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout i // AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx // returns leaseID acquired +// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum +// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1. func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) { headers := b.client.getStandardHeaders() headers[leaseAction] = acquireLease - if leaseTimeInSeconds > 0 { - headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) + if leaseTimeInSeconds == -1 { + // Do nothing, but don't trigger the following clauses. + } else if leaseTimeInSeconds > 60 || b.client.apiVersion < "2012-02-12" { + leaseTimeInSeconds = 60 + } else if leaseTimeInSeconds < 15 { + leaseTimeInSeconds = 15 } + headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) + if proposedLeaseID != "" { headers[leaseProposedID] = proposedLeaseID } @@ -805,7 +381,7 @@ func (b BlobStorageClient) AcquireLease(container string, name string, leaseTime return "", err } - returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID)) + returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) if returnedLeaseID != "" { return returnedLeaseID, nil @@ -856,7 +432,7 @@ func (b BlobStorageClient) breakLeaseCommon(container string, name string, heade func (b BlobStorageClient) ChangeLease(container string, name string, currentLeaseID string, proposedLeaseID string) (newLeaseID string, err error) { headers := b.client.getStandardHeaders() headers[leaseAction] = changeLease - headers[leaseID] = currentLeaseID + headers[headerLeaseID] = currentLeaseID headers[leaseProposedID] = proposedLeaseID respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusOK) @@ -864,7 +440,7 @@ func (b BlobStorageClient) ChangeLease(container string, name string, currentLea return "", err } - newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID)) + newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) if newLeaseID != "" { return newLeaseID, nil } @@ -876,7 +452,7 @@ func (b BlobStorageClient) ChangeLease(container string, name string, currentLea func (b BlobStorageClient) ReleaseLease(container string, name string, currentLeaseID string) error { headers := b.client.getStandardHeaders() headers[leaseAction] = releaseLease - headers[leaseID] = currentLeaseID + headers[headerLeaseID] = currentLeaseID _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) if err != nil { @@ -890,7 +466,7 @@ func (b BlobStorageClient) ReleaseLease(container string, name string, currentLe func (b BlobStorageClient) RenewLease(container string, name string, currentLeaseID string) error { headers := b.client.getStandardHeaders() headers[leaseAction] = renewLease - headers[leaseID] = currentLeaseID + headers[headerLeaseID] = currentLeaseID _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) if err != nil { @@ -903,17 +479,16 @@ func (b BlobStorageClient) RenewLease(container string, name string, currentLeas // GetBlobProperties provides various information about the specified // blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) { - verb := "HEAD" uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) headers := b.client.getStandardHeaders() - resp, err := b.client.exec(verb, uri, headers, nil) + resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth) if err != nil { return nil, err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { return nil, err } @@ -953,6 +528,7 @@ func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobPrope CopyStatus: resp.headers.Get("x-ms-copy-status"), BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), LeaseStatus: resp.headers.Get("x-ms-lease-status"), + LeaseState: resp.headers.Get("x-ms-lease-state"), }, nil } @@ -975,11 +551,11 @@ func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders headers[k] = v } - resp, err := b.client.exec("PUT", uri, headers, nil) + resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusOK}) } @@ -995,6 +571,8 @@ func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string, extraHeaders map[string]string) error { params := url.Values{"comp": {"metadata"}} uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + metadata = b.client.protectUserAgent(metadata) + extraHeaders = b.client.protectUserAgent(extraHeaders) headers := b.client.getStandardHeaders() for k, v := range metadata { headers[userDefinedMetadataHeaderPrefix+k] = v @@ -1004,11 +582,11 @@ func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[ headers[k] = v } - resp, err := b.client.exec("PUT", uri, headers, nil) + resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusOK}) } @@ -1024,11 +602,11 @@ func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]s uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) headers := b.client.getStandardHeaders() - resp, err := b.client.exec("GET", uri, headers, nil) + resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) if err != nil { return nil, err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { return nil, err @@ -1075,6 +653,7 @@ func (b BlobStorageClient) CreateBlockBlob(container, name string) error { func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error { path := fmt.Sprintf("%s/%s", container, name) uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + extraHeaders = b.client.protectUserAgent(extraHeaders) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypeBlock) headers["Content-Length"] = fmt.Sprintf("%d", size) @@ -1083,11 +662,11 @@ func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, siz headers[k] = v } - resp, err := b.client.exec("PUT", uri, headers, blob) + resp, err := b.client.exec(http.MethodPut, uri, headers, blob, b.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } @@ -1112,6 +691,7 @@ func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byt // See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error { uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) + extraHeaders = b.client.protectUserAgent(extraHeaders) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypeBlock) headers["Content-Length"] = fmt.Sprintf("%v", size) @@ -1120,12 +700,12 @@ func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, s headers[k] = v } - resp, err := b.client.exec("PUT", uri, headers, blob) + resp, err := b.client.exec(http.MethodPut, uri, headers, blob, b.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } @@ -1139,11 +719,11 @@ func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) headers := b.client.getStandardHeaders() headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) - resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML)) + resp, err := b.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } @@ -1156,7 +736,7 @@ func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockL headers := b.client.getStandardHeaders() var out BlockListResponse - resp, err := b.client.exec("GET", uri, headers, nil) + resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) if err != nil { return out, err } @@ -1174,6 +754,7 @@ func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockL func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error { path := fmt.Sprintf("%s/%s", container, name) uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + extraHeaders = b.client.protectUserAgent(extraHeaders) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypePage) headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) @@ -1182,11 +763,11 @@ func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extra headers[k] = v } - resp, err := b.client.exec("PUT", uri, headers, nil) + resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } @@ -1199,6 +780,7 @@ func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extra func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error { path := fmt.Sprintf("%s/%s", container, name) uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) + extraHeaders = b.client.protectUserAgent(extraHeaders) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypePage) headers["x-ms-page-write"] = string(writeType) @@ -1217,11 +799,11 @@ func (b BlobStorageClient) PutPage(container, name string, startByte, endByte in } headers["Content-Length"] = fmt.Sprintf("%v", contentLength) - resp, err := b.client.exec("PUT", uri, headers, data) + resp, err := b.client.exec(http.MethodPut, uri, headers, data, b.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } @@ -1235,13 +817,13 @@ func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesR headers := b.client.getStandardHeaders() var out GetPageRangesResponse - resp, err := b.client.exec("GET", uri, headers, nil) + resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) if err != nil { return out, err } defer resp.body.Close() - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { return out, err } err = xmlUnmarshal(resp.body, &out) @@ -1255,6 +837,7 @@ func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesR func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error { path := fmt.Sprintf("%s/%s", container, name) uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + extraHeaders = b.client.protectUserAgent(extraHeaders) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypeAppend) @@ -1262,11 +845,11 @@ func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders ma headers[k] = v } - resp, err := b.client.exec("PUT", uri, headers, nil) + resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } @@ -1277,6 +860,7 @@ func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders ma func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, extraHeaders map[string]string) error { path := fmt.Sprintf("%s/%s", container, name) uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}}) + extraHeaders = b.client.protectUserAgent(extraHeaders) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypeAppend) headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) @@ -1285,11 +869,11 @@ func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, ext headers[k] = v } - resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) + resp, err := b.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } @@ -1320,11 +904,11 @@ func (b BlobStorageClient) StartBlobCopy(container, name, sourceBlob string) (st headers := b.client.getStandardHeaders() headers["x-ms-copy-source"] = sourceBlob - resp, err := b.client.exec("PUT", uri, headers, nil) + resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) if err != nil { return "", err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { return "", err @@ -1352,14 +936,14 @@ func (b BlobStorageClient) AbortBlobCopy(container, name, copyID, currentLeaseID headers["x-ms-copy-action"] = "abort" if currentLeaseID != "" { - headers[leaseID] = currentLeaseID + headers[headerLeaseID] = currentLeaseID } - resp, err := b.client.exec("PUT", uri, headers, nil) + resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { return err @@ -1403,7 +987,7 @@ func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[s if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) } @@ -1414,7 +998,7 @@ func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[s func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) { resp, err := b.deleteBlob(container, name, extraHeaders) if resp != nil { - defer resp.body.Close() + defer readAndCloseBody(resp.body) if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { return resp.statusCode == http.StatusAccepted, nil } @@ -1423,19 +1007,14 @@ func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeade } func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[string]string) (*storageResponse, error) { - verb := "DELETE" uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + extraHeaders = b.client.protectUserAgent(extraHeaders) headers := b.client.getStandardHeaders() for k, v := range extraHeaders { headers[k] = v } - return b.client.exec(verb, uri, headers, nil) -} - -// helper method to construct the path to a container given its name -func pathForContainer(name string) string { - return fmt.Sprintf("/%s", name) + return b.client.exec(http.MethodDelete, uri, headers, nil, b.auth) } // helper method to construct the path to a blob given its container and blob @@ -1444,8 +1023,16 @@ func pathForBlob(container, name string) string { return fmt.Sprintf("/%s/%s", container, name) } +// helper method to construct the path to either a blob or container +func pathForResource(container, name string) string { + if len(name) > 0 { + return fmt.Sprintf("/%s/%s", container, name) + } + return fmt.Sprintf("/%s", container) +} + // GetBlobSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed procotols. +// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols. // If old API version is used but no signedIP is passed (ie empty string) then this should still work. // We only populate the signedIP when it non-empty. // @@ -1455,7 +1042,7 @@ func (b BlobStorageClient) GetBlobSASURIWithSignedIPAndProtocol(container, name signedPermissions = permissions blobURL = b.GetBlobURL(container, name) ) - canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL) + canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL, b.auth) if err != nil { return "", err } @@ -1473,7 +1060,12 @@ func (b BlobStorageClient) GetBlobSASURIWithSignedIPAndProtocol(container, name } signedExpiry := expiry.UTC().Format(time.RFC3339) - signedResource := "b" + + //If blob name is missing, resource is a container + signedResource := "c" + if len(name) > 0 { + signedResource = "b" + } protocols := "https,http" if HTTPSOnly { @@ -1536,61 +1128,3 @@ func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, sig return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") } - -func generatePermissions(accessPolicy AccessPolicyDetails) (permissions string) { - // generate the permissions string (rwd). - // still want the end user API to have bool flags. - permissions = "" - - if accessPolicy.CanRead { - permissions += "r" - } - - if accessPolicy.CanWrite { - permissions += "w" - } - - if accessPolicy.CanDelete { - permissions += "d" - } - - return permissions -} - -// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the -// AccessPolicy struct which will get converted to XML. -func convertAccessPolicyToXMLStructs(accessPolicy AccessPolicyDetails) SignedIdentifiers { - return SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{ - { - ID: accessPolicy.ID, - AccessPolicy: AccessPolicyDetailsXML{ - StartTime: accessPolicy.StartTime.UTC().Round(time.Second), - ExpiryTime: accessPolicy.ExpiryTime.UTC().Round(time.Second), - Permission: generatePermissions(accessPolicy), - }, - }, - }, - } -} - -// generateAccessPolicy generates the XML access policy used as the payload for SetContainerPermissions. -func generateAccessPolicy(accessPolicy AccessPolicyDetails) (accessPolicyXML string, err error) { - - if accessPolicy.ID != "" { - signedIdentifiers := convertAccessPolicyToXMLStructs(accessPolicy) - body, _, err := xmlMarshal(signedIdentifiers) - if err != nil { - return "", err - } - - xmlByteArray, err := ioutil.ReadAll(body) - if err != nil { - return "", err - } - accessPolicyXML = string(xmlByteArray) - return accessPolicyXML, nil - } - - return "", nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go new file mode 100644 index 00000000..e5911ac8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go @@ -0,0 +1,92 @@ +package storage + +import ( + "fmt" + "net/http" + "net/url" +) + +// BlobStorageClient contains operations for Microsoft Azure Blob Storage +// Service. +type BlobStorageClient struct { + client Client + auth authentication +} + +// GetServiceProperties gets the properties of your storage account's blob service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties +func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) { + return b.client.getServiceProperties(blobServiceName, b.auth) +} + +// SetServiceProperties sets the properties of your storage account's blob service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties +func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error { + return b.client.setServiceProperties(props, blobServiceName, b.auth) +} + +// ListContainersParameters defines the set of customizable parameters to make a +// List Containers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ListContainersParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +// GetContainerReference returns a Container object for the specified container name. +func (b BlobStorageClient) GetContainerReference(name string) Container { + return Container{ + bsc: &b, + Name: name, + } +} + +// ListContainers returns the list of containers in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, "", q) + headers := b.client.getStandardHeaders() + + var out ContainerListResponse + resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &out) + + // assign our client to the newly created Container objects + for i := range out.Containers { + out.Containers[i].bsc = &b + } + return &out, err +} + +func (p ListContainersParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go index ad5affa4..e42082d4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -12,20 +12,21 @@ import ( "io/ioutil" "net/http" "net/url" - "regexp" - "sort" + "runtime" "strconv" "strings" + + "github.com/Azure/go-autorest/autorest/azure" ) const ( - // DefaultBaseURL is the domain name used for storage requests when a - // default client is created. + // DefaultBaseURL is the domain name used for storage requests in the + // public cloud when a default client is created. DefaultBaseURL = "core.windows.net" - // DefaultAPIVersion is the Azure Storage API version string used when a + // DefaultAPIVersion is the Azure Storage API version string used when a // basic client is created. - DefaultAPIVersion = "2015-02-21" + DefaultAPIVersion = "2015-04-05" defaultUseHTTPS = true @@ -43,6 +44,8 @@ const ( storageEmulatorBlob = "127.0.0.1:10000" storageEmulatorTable = "127.0.0.1:10002" storageEmulatorQueue = "127.0.0.1:10001" + + userAgentHeader = "User-Agent" ) // Client is the object that needs to be constructed to perform @@ -52,11 +55,13 @@ type Client struct { // requests. If it is nil, http.DefaultClient is used. HTTPClient *http.Client - accountName string - accountKey []byte - useHTTPS bool - baseURL string - apiVersion string + accountName string + accountKey []byte + useHTTPS bool + UseSharedKeyLite bool + baseURL string + apiVersion string + userAgent string } type storageResponse struct { @@ -128,7 +133,15 @@ func NewBasicClient(accountName, accountKey string) (Client, error) { return NewEmulatorClient() } return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) +} +// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and +// key in the referenced cloud. +func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) { + if accountName == StorageEmulatorAccountName { + return NewEmulatorClient() + } + return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS) } //NewEmulatorClient contructs a Client intended to only work with Azure @@ -155,13 +168,46 @@ func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, u return c, fmt.Errorf("azure: malformed storage account key: %v", err) } - return Client{ - accountName: accountName, - accountKey: key, - useHTTPS: useHTTPS, - baseURL: blobServiceBaseURL, - apiVersion: apiVersion, - }, nil + c = Client{ + accountName: accountName, + accountKey: key, + useHTTPS: useHTTPS, + baseURL: blobServiceBaseURL, + apiVersion: apiVersion, + UseSharedKeyLite: false, + } + c.userAgent = c.getDefaultUserAgent() + return c, nil +} + +func (c Client) getDefaultUserAgent() string { + return fmt.Sprintf("Go/%s (%s-%s) Azure-SDK-For-Go/%s storage-dataplane/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + sdkVersion, + c.apiVersion, + ) +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent) +} + +// protectUserAgent is used in funcs that include extraheaders as a parameter. +// It prevents the User-Agent header to be overwritten, instead if it happens to +// be present, it gets added to the current User-Agent. Use it before getStandardHeaders +func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string { + if v, ok := extraheaders[userAgentHeader]; ok { + c.AddToUserAgent(v) + delete(extraheaders, userAgentHeader) + } + return extraheaders } func (c Client) getBaseURL(service string) string { @@ -213,181 +259,69 @@ func (c Client) getEndpoint(service, path string, params url.Values) string { // GetBlobService returns a BlobStorageClient which can operate on the blob // service of the storage account. func (c Client) GetBlobService() BlobStorageClient { - return BlobStorageClient{c} + b := BlobStorageClient{ + client: c, + } + b.client.AddToUserAgent(blobServiceName) + b.auth = sharedKey + if c.UseSharedKeyLite { + b.auth = sharedKeyLite + } + return b } // GetQueueService returns a QueueServiceClient which can operate on the queue // service of the storage account. func (c Client) GetQueueService() QueueServiceClient { - return QueueServiceClient{c} + q := QueueServiceClient{ + client: c, + } + q.client.AddToUserAgent(queueServiceName) + q.auth = sharedKey + if c.UseSharedKeyLite { + q.auth = sharedKeyLite + } + return q } // GetTableService returns a TableServiceClient which can operate on the table // service of the storage account. func (c Client) GetTableService() TableServiceClient { - return TableServiceClient{c} + t := TableServiceClient{ + client: c, + } + t.client.AddToUserAgent(tableServiceName) + t.auth = sharedKeyForTable + if c.UseSharedKeyLite { + t.auth = sharedKeyLiteForTable + } + return t } // GetFileService returns a FileServiceClient which can operate on the file // service of the storage account. func (c Client) GetFileService() FileServiceClient { - return FileServiceClient{c} -} - -func (c Client) createAuthorizationHeader(canonicalizedString string) string { - signature := c.computeHmac256(canonicalizedString) - return fmt.Sprintf("%s %s:%s", "SharedKey", c.getCanonicalizedAccountName(), signature) -} - -func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) { - canonicalizedResource, err := c.buildCanonicalizedResource(url) - if err != nil { - return "", err + f := FileServiceClient{ + client: c, } - - canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource) - return c.createAuthorizationHeader(canonicalizedString), nil + f.client.AddToUserAgent(fileServiceName) + f.auth = sharedKey + if c.UseSharedKeyLite { + f.auth = sharedKeyLite + } + return f } func (c Client) getStandardHeaders() map[string]string { return map[string]string{ - "x-ms-version": c.apiVersion, - "x-ms-date": currentTimeRfc1123Formatted(), + userAgentHeader: c.userAgent, + "x-ms-version": c.apiVersion, + "x-ms-date": currentTimeRfc1123Formatted(), } } -func (c Client) getCanonicalizedAccountName() string { - // since we may be trying to access a secondary storage account, we need to - // remove the -secondary part of the storage name - return strings.TrimSuffix(c.accountName, "-secondary") -} - -func (c Client) buildCanonicalizedHeader(headers map[string]string) string { - cm := make(map[string]string) - - for k, v := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - match, _ := regexp.MatchString("x-ms-", headerName) - if match { - cm[headerName] = v - } - } - - if len(cm) == 0 { - return "" - } - - keys := make([]string, 0, len(cm)) - for key := range cm { - keys = append(keys, key) - } - - sort.Strings(keys) - - ch := "" - - for i, key := range keys { - if i == len(keys)-1 { - ch += fmt.Sprintf("%s:%s", key, cm[key]) - } else { - ch += fmt.Sprintf("%s:%s\n", key, cm[key]) - } - } - return ch -} - -func (c Client) buildCanonicalizedResourceTable(uri string) (string, error) { - errMsg := "buildCanonicalizedResourceTable error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := "/" + c.getCanonicalizedAccountName() - - if len(u.Path) > 0 { - cr += u.EscapedPath() - } - - return cr, nil -} - -func (c Client) buildCanonicalizedResource(uri string) (string, error) { - errMsg := "buildCanonicalizedResource error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := "/" + c.getCanonicalizedAccountName() - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr += u.EscapedPath() - } - - params, err := url.ParseQuery(u.RawQuery) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - if len(params) > 0 { - cr += "\n" - keys := make([]string, 0, len(params)) - for key := range params { - keys = append(keys, key) - } - - sort.Strings(keys) - - for i, key := range keys { - if len(params[key]) > 1 { - sort.Strings(params[key]) - } - - if i == len(keys)-1 { - cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")) - } else { - cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ",")) - } - } - } - - return cr, nil -} - -func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string { - contentLength := headers["Content-Length"] - if contentLength == "0" { - contentLength = "" - } - canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", - verb, - headers["Content-Encoding"], - headers["Content-Language"], - contentLength, - headers["Content-MD5"], - headers["Content-Type"], - headers["Date"], - headers["If-Modified-Since"], - headers["If-Match"], - headers["If-None-Match"], - headers["If-Unmodified-Since"], - headers["Range"], - c.buildCanonicalizedHeader(headers), - canonicalizedResource) - - return canonicalizedString -} - -func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) { - authHeader, err := c.getAuthorizationHeader(verb, url, headers) - if err != nil { - return nil, err - } - headers["Authorization"] = authHeader +func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) { + headers, err := c.addAuthorizationHeader(verb, url, headers, auth) if err != nil { return nil, err } @@ -423,7 +357,7 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader statusCode := resp.StatusCode if statusCode >= 400 && statusCode <= 505 { var respBody []byte - respBody, err = readResponseBody(resp) + respBody, err = readAndCloseBody(resp.Body) if err != nil { return nil, err } @@ -453,7 +387,12 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader body: resp.Body}, nil } -func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) { +func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { + headers, err := c.addAuthorizationHeader(verb, url, headers, auth) + if err != nil { + return nil, err + } + req, err := http.NewRequest(verb, url, body) for k, v := range headers { req.Header.Add(k, v) @@ -477,7 +416,7 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo statusCode := resp.StatusCode if statusCode >= 400 && statusCode <= 505 { var respBody []byte - respBody, err = readResponseBody(resp) + respBody, err = readAndCloseBody(resp.Body) if err != nil { return nil, err } @@ -495,31 +434,9 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo return respToRet, nil } -func (c Client) createSharedKeyLite(url string, headers map[string]string) (string, error) { - can, err := c.buildCanonicalizedResourceTable(url) - - if err != nil { - return "", err - } - strToSign := headers["x-ms-date"] + "\n" + can - - hmac := c.computeHmac256(strToSign) - return fmt.Sprintf("SharedKeyLite %s:%s", c.accountName, hmac), nil -} - -func (c Client) execTable(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) { - var err error - headers["Authorization"], err = c.createSharedKeyLite(url, headers) - if err != nil { - return nil, err - } - - return c.execInternalJSON(verb, url, headers, body) -} - -func readResponseBody(resp *http.Response) ([]byte, error) { - defer resp.Body.Close() - out, err := ioutil.ReadAll(resp.Body) +func readAndCloseBody(body io.ReadCloser) ([]byte, error) { + defer body.Close() + out, err := ioutil.ReadAll(body) if err == io.EOF { err = nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go new file mode 100644 index 00000000..f0642396 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go @@ -0,0 +1,376 @@ +package storage + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" +) + +// Container represents an Azure container. +type Container struct { + bsc *BlobStorageClient + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` +} + +func (c *Container) buildPath() string { + return fmt.Sprintf("/%s", c.Name) +} + +// ContainerProperties contains various properties of a container returned from +// various endpoints like ListContainers. +type ContainerProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` +} + +// ContainerListResponse contains the response fields from +// ListContainers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ContainerListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []Container `xml:"Containers>Container"` +} + +// BlobListResponse contains the response fields from ListBlobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type BlobListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Blobs []Blob `xml:"Blobs>Blob"` + + // BlobPrefix is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + // The list here can be thought of as "folders" that may contain + // other folders or blobs. + BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` + + // Delimiter is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + Delimiter string `xml:"Delimiter"` +} + +// ListBlobsParameters defines the set of customizable +// parameters to make a List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type ListBlobsParameters struct { + Prefix string + Delimiter string + Marker string + Include string + MaxResults uint + Timeout uint +} + +func (p ListBlobsParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Delimiter != "" { + out.Set("delimiter", p.Delimiter) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// ContainerAccessType defines the access level to the container from a public +// request. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- +// blob-public-access" header. +type ContainerAccessType string + +// Access options for containers +const ( + ContainerAccessTypePrivate ContainerAccessType = "" + ContainerAccessTypeBlob ContainerAccessType = "blob" + ContainerAccessTypeContainer ContainerAccessType = "container" +) + +// ContainerAccessPolicy represents each access policy in the container ACL. +type ContainerAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanWrite bool + CanDelete bool +} + +// ContainerPermissions represents the container ACLs. +type ContainerPermissions struct { + AccessType ContainerAccessType + AccessPolicies []ContainerAccessPolicy +} + +// ContainerAccessHeader references header used when setting/getting container ACL +const ( + ContainerAccessHeader string = "x-ms-blob-public-access" +) + +// Create creates a blob container within the storage account +// with given name and access level. Returns error if container already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx +func (c *Container) Create() error { + resp, err := c.create() + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateIfNotExists creates a blob container if it does not exist. Returns +// true if container is newly created or false if container already exists. +func (c *Container) CreateIfNotExists() (bool, error) { + resp, err := c.create() + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +func (c *Container) create() (*storageResponse, error) { + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) + headers := c.bsc.client.getStandardHeaders() + return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) +} + +// Exists returns true if a container with given name exists +// on the storage account, otherwise returns false. +func (c *Container) Exists() (bool, error) { + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) + headers := c.bsc.client.getStandardHeaders() + + resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// SetPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx +func (c *Container) SetPermissions(permissions ContainerPermissions, timeout int, leaseID string) error { + params := url.Values{ + "restype": {"container"}, + "comp": {"acl"}, + } + + if timeout > 0 { + params.Add("timeout", strconv.Itoa(timeout)) + } + + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + headers := c.bsc.client.getStandardHeaders() + if permissions.AccessType != "" { + headers[ContainerAccessHeader] = string(permissions.AccessType) + } + + if leaseID != "" { + headers[headerLeaseID] = leaseID + } + + body, length, err := generateContainerACLpayload(permissions.AccessPolicies) + headers["Content-Length"] = strconv.Itoa(length) + + resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return errors.New("Unable to set permissions") + } + + return nil +} + +// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx +// If timeout is 0 then it will not be passed to Azure +// leaseID will only be passed to Azure if populated +func (c *Container) GetPermissions(timeout int, leaseID string) (*ContainerPermissions, error) { + params := url.Values{ + "restype": {"container"}, + "comp": {"acl"}, + } + + if timeout > 0 { + params.Add("timeout", strconv.Itoa(timeout)) + } + + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + headers := c.bsc.client.getStandardHeaders() + + if leaseID != "" { + headers[headerLeaseID] = leaseID + } + + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + var ap AccessPolicy + err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return buildAccessPolicy(ap, &resp.headers), nil +} + +func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions { + // containerAccess. Blob, Container, empty + containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) + permissions := ContainerPermissions{ + AccessType: ContainerAccessType(containerAccess), + AccessPolicies: []ContainerAccessPolicy{}, + } + + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + capd := ContainerAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w") + capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") + + permissions.AccessPolicies = append(permissions.AccessPolicies, capd) + } + return &permissions +} + +// Delete deletes the container with given name on the storage +// account. If the container does not exist returns error. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx +func (c *Container) Delete() error { + resp, err := c.delete() + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteIfExists deletes the container with given name on the storage +// account if it exists. Returns true if container is deleted with this call, or +// false if the container did not exist at the time of the Delete Container +// operation. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx +func (c *Container) DeleteIfExists() (bool, error) { + resp, err := c.delete() + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (c *Container) delete() (*storageResponse, error) { + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) + headers := c.bsc.client.getStandardHeaders() + return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth) +} + +// ListBlobs returns an object that contains list of blobs in the container, +// pagination token and other information in the response of List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{ + "restype": {"container"}, + "comp": {"list"}}, + ) + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + headers := c.bsc.client.getStandardHeaders() + + var out BlobListResponse + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, capd := range policies { + permission := capd.generateContainerPermissions() + signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) { + // generate the permissions string (rwd). + // still want the end user API to have bool flags. + permissions = "" + + if capd.CanRead { + permissions += "r" + } + + if capd.CanWrite { + permissions += "w" + } + + if capd.CanDelete { + permissions += "d" + } + + return permissions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go new file mode 100644 index 00000000..d27e6207 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go @@ -0,0 +1,217 @@ +package storage + +import ( + "encoding/xml" + "net/http" + "net/url" +) + +// Directory represents a directory on a share. +type Directory struct { + fsc *FileServiceClient + Metadata map[string]string + Name string `xml:"Name"` + parent *Directory + Properties DirectoryProperties + share *Share +} + +// DirectoryProperties contains various properties of a directory. +type DirectoryProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` +} + +// ListDirsAndFilesParameters defines the set of customizable parameters to +// make a List Files and Directories call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx +type ListDirsAndFilesParameters struct { + Marker string + MaxResults uint + Timeout uint +} + +// DirsAndFilesListResponse contains the response fields from +// a List Files and Directories call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx +type DirsAndFilesListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Marker string `xml:"Marker"` + MaxResults int64 `xml:"MaxResults"` + Directories []Directory `xml:"Entries>Directory"` + Files []File `xml:"Entries>File"` + NextMarker string `xml:"NextMarker"` +} + +// builds the complete directory path for this directory object. +func (d *Directory) buildPath() string { + path := "" + current := d + for current.Name != "" { + path = "/" + current.Name + path + current = current.parent + } + return d.share.buildPath() + path +} + +// Create this directory in the associated share. +// If a directory with the same name already exists, the operation fails. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx +func (d *Directory) Create() error { + // if this is the root directory exit early + if d.parent == nil { + return nil + } + + headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, nil, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated}) + if err != nil { + return err + } + + d.updateEtagAndLastModified(headers) + return nil +} + +// CreateIfNotExists creates this directory under the associated share if the +// directory does not exists. Returns true if the directory is newly created or +// false if the directory already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx +func (d *Directory) CreateIfNotExists() (bool, error) { + // if this is the root directory exit early + if d.parent == nil { + return false, nil + } + + resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, nil, nil) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + if resp.statusCode == http.StatusCreated { + d.updateEtagAndLastModified(resp.headers) + return true, nil + } + + return false, d.FetchAttributes() + } + } + + return false, err +} + +// Delete removes this directory. It must be empty in order to be deleted. +// If the directory does not exist the operation fails. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx +func (d *Directory) Delete() error { + return d.fsc.deleteResource(d.buildPath(), resourceDirectory) +} + +// DeleteIfExists removes this directory if it exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx +func (d *Directory) DeleteIfExists() (bool, error) { + resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// Exists returns true if this directory exists. +func (d *Directory) Exists() (bool, error) { + exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory) + if exists { + d.updateEtagAndLastModified(headers) + } + return exists, err +} + +// FetchAttributes retrieves metadata for this directory. +func (d *Directory) FetchAttributes() error { + headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, http.MethodHead) + if err != nil { + return err + } + + d.updateEtagAndLastModified(headers) + d.Metadata = getMetadataFromHeaders(headers) + + return nil +} + +// GetDirectoryReference returns a child Directory object for this directory. +func (d *Directory) GetDirectoryReference(name string) *Directory { + return &Directory{ + fsc: d.fsc, + Name: name, + parent: d, + share: d.share, + } +} + +// GetFileReference returns a child File object for this directory. +func (d *Directory) GetFileReference(name string) *File { + return &File{ + fsc: d.fsc, + Name: name, + parent: d, + share: d.share, + } +} + +// ListDirsAndFiles returns a list of files and directories under this directory. +// It also contains a pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx +func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) { + q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) + + resp, err := d.fsc.listContent(d.buildPath(), q, nil) + if err != nil { + return nil, err + } + + defer resp.body.Close() + var out DirsAndFilesListResponse + err = xmlUnmarshal(resp.body, &out) + return &out, err +} + +// SetMetadata replaces the metadata for this directory. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetDirectoryMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx +func (d *Directory) SetMetadata() error { + headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil)) + if err != nil { + return err + } + + d.updateEtagAndLastModified(headers) + return nil +} + +// updates Etag and last modified date +func (d *Directory) updateEtagAndLastModified(headers http.Header) { + d.Properties.Etag = headers.Get("Etag") + d.Properties.LastModified = headers.Get("Last-Modified") +} + +// URL gets the canonical URL to this directory. +// This method does not create a publicly accessible URL if the directory +// is private and this method does not check if the directory exists. +func (d *Directory) URL() string { + return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go index f679395b..e4901a11 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -1,118 +1,74 @@ package storage import ( - "encoding/xml" "errors" "fmt" "io" + "io/ioutil" "net/http" "net/url" "strconv" - "strings" ) -// FileServiceClient contains operations for Microsoft Azure File Service. -type FileServiceClient struct { - client Client -} +const fourMB = uint64(4194304) +const oneTB = uint64(1099511627776) -// A Share is an entry in ShareListResponse. -type Share struct { - Name string `xml:"Name"` - Properties ShareProperties `xml:"Properties"` -} - -// A Directory is an entry in DirsAndFilesListResponse. -type Directory struct { - Name string `xml:"Name"` -} - -// A File is an entry in DirsAndFilesListResponse. +// File represents a file on a share. type File struct { - Name string `xml:"Name"` - Properties FileProperties `xml:"Properties"` + fsc *FileServiceClient + Metadata map[string]string + Name string `xml:"Name"` + parent *Directory + Properties FileProperties `xml:"Properties"` + share *Share + FileCopyProperties FileCopyState } -// ShareProperties contains various properties of a share returned from -// various endpoints like ListShares. -type ShareProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - Quota string `xml:"Quota"` -} - -// DirectoryProperties contains various properties of a directory returned -// from various endpoints like GetDirectoryProperties. -type DirectoryProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` -} - -// FileProperties contains various properties of a file returned from -// various endpoints like ListDirsAndFiles. +// FileProperties contains various properties of a file. type FileProperties struct { - CacheControl string `header:"x-ms-cache-control"` - ContentLength uint64 `xml:"Content-Length"` - ContentType string `header:"x-ms-content-type"` - CopyCompletionTime string - CopyID string - CopySource string - CopyProgress string - CopyStatusDesc string - CopyStatus string - Disposition string `header:"x-ms-content-disposition"` - Encoding string `header:"x-ms-content-encoding"` - Etag string - Language string `header:"x-ms-content-language"` - LastModified string - MD5 string `header:"x-ms-content-md5"` + CacheControl string `header:"x-ms-cache-control"` + Disposition string `header:"x-ms-content-disposition"` + Encoding string `header:"x-ms-content-encoding"` + Etag string + Language string `header:"x-ms-content-language"` + LastModified string + Length uint64 `xml:"Content-Length"` + MD5 string `header:"x-ms-content-md5"` + Type string `header:"x-ms-content-type"` +} + +// FileCopyState contains various properties of a file copy operation. +type FileCopyState struct { + CompletionTime string + ID string `header:"x-ms-copy-id"` + Progress string + Source string + Status string `header:"x-ms-copy-status"` + StatusDesc string } // FileStream contains file data returned from a call to GetFile. type FileStream struct { Body io.ReadCloser - Properties *FileProperties - Metadata map[string]string + ContentMD5 string } -// ShareListResponse contains the response fields from -// ListShares call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx -type ShareListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Shares []Share `xml:"Shares>Share"` +// FileRequestOptions will be passed to misc file operations. +// Currently just Timeout (in seconds) but will expand. +type FileRequestOptions struct { + Timeout uint // timeout duration in seconds. } -// ListSharesParameters defines the set of customizable parameters to make a -// List Shares call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx -type ListSharesParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} +// getParameters, construct parameters for FileRequestOptions. +// currently only timeout, but expecting to grow as functionality fills out. +func (p FileRequestOptions) getParameters() url.Values { + out := url.Values{} -// DirsAndFilesListResponse contains the response fields from -// a List Files and Directories call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx -type DirsAndFilesListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Marker string `xml:"Marker"` - MaxResults int64 `xml:"MaxResults"` - Directories []Directory `xml:"Entries>Directory"` - Files []File `xml:"Entries>File"` - NextMarker string `xml:"NextMarker"` + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out } // FileRanges contains a list of file range information for a file. @@ -133,133 +89,163 @@ type FileRange struct { End uint64 `xml:"End"` } -// ListDirsAndFilesParameters defines the set of customizable parameters to -// make a List Files and Directories call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx -type ListDirsAndFilesParameters struct { - Marker string - MaxResults uint - Timeout uint -} - -// ShareHeaders contains various properties of a file and is an entry -// in SetShareProperties -type ShareHeaders struct { - Quota string `header:"x-ms-share-quota"` -} - -type compType string - -const ( - compNone compType = "" - compList compType = "list" - compMetadata compType = "metadata" - compProperties compType = "properties" - compRangeList compType = "rangelist" -) - -func (ct compType) String() string { - return string(ct) -} - -type resourceType string - -const ( - resourceDirectory resourceType = "directory" - resourceFile resourceType = "" - resourceShare resourceType = "share" -) - -func (rt resourceType) String() string { - return string(rt) -} - -func (p ListSharesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -func (p ListDirsAndFilesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - func (fr FileRange) String() string { return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End) } -// ToPathSegment returns the URL path segment for the specified values -func ToPathSegment(parts ...string) string { - join := strings.Join(parts, "/") - if join[0] != '/' { - join = fmt.Sprintf("/%s", join) - } - return join +// builds the complete file path for this file object +func (f *File) buildPath() string { + return f.parent.buildPath() + "/" + f.Name } -// returns url.Values for the specified types -func getURLInitValues(comp compType, res resourceType) url.Values { - values := url.Values{} - if comp != compNone { - values.Set("comp", comp.String()) - } - if res != resourceFile { - values.Set("restype", res.String()) - } - return values -} - -// ListDirsAndFiles returns a list of files or directories under the specified share or -// directory. It also contains a pagination token and other response details. +// ClearRange releases the specified range of space in a file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx -func (f FileServiceClient) ListDirsAndFiles(path string, params ListDirsAndFilesParameters) (DirsAndFilesListResponse, error) { - q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) - - var out DirsAndFilesListResponse - resp, err := f.listContent(path, q, nil) +// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx +func (f *File) ClearRange(fileRange FileRange) error { + headers, err := f.modifyRange(nil, fileRange, nil) if err != nil { - return out, err + return err } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &out) - return out, err + f.updateEtagAndLastModified(headers) + return nil } -// ListFileRanges returns the list of valid ranges for a file. +// Create creates a new file or replaces an existing one. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx +func (f *File) Create(maxSize uint64) error { + if maxSize > oneTB { + return fmt.Errorf("max file size is 1TB") + } + + extraHeaders := map[string]string{ + "x-ms-content-length": strconv.FormatUint(maxSize, 10), + "x-ms-type": "file", + } + + headers, err := f.fsc.createResource(f.buildPath(), resourceFile, nil, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusCreated}) + if err != nil { + return err + } + + f.Properties.Length = maxSize + f.updateEtagAndLastModified(headers) + return nil +} + +// CopyFile operation copied a file/blob from the sourceURL to the path provided. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file +func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error { + extraHeaders := map[string]string{ + "x-ms-type": "file", + "x-ms-copy-source": sourceURL, + } + + var parameters url.Values + if options != nil { + parameters = options.getParameters() + } + + headers, err := f.fsc.createResource(f.buildPath(), resourceFile, parameters, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted}) + if err != nil { + return err + } + + f.updateEtagLastModifiedAndCopyHeaders(headers) + return nil +} + +// Delete immediately removes this file from the storage account. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx +func (f *File) Delete() error { + return f.fsc.deleteResource(f.buildPath(), resourceFile) +} + +// DeleteIfExists removes this file if it exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx +func (f *File) DeleteIfExists() (bool, error) { + resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file +func (f *File) DownloadRangeToStream(fileRange FileRange, getContentMD5 bool) (fs FileStream, err error) { + if getContentMD5 && isRangeTooBig(fileRange) { + return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") + } + + extraHeaders := map[string]string{ + "Range": fileRange.String(), + } + if getContentMD5 == true { + extraHeaders["x-ms-range-get-content-md5"] = "true" + } + + resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, http.MethodGet, extraHeaders) + if err != nil { + return fs, err + } + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil { + resp.body.Close() + return fs, err + } + + fs.Body = resp.body + if getContentMD5 { + fs.ContentMD5 = resp.headers.Get("Content-MD5") + } + return fs, nil +} + +// Exists returns true if this file exists. +func (f *File) Exists() (bool, error) { + exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile) + if exists { + f.updateEtagAndLastModified(headers) + f.updateProperties(headers) + } + return exists, err +} + +// FetchAttributes updates metadata and properties for this file. +func (f *File) FetchAttributes() error { + headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, http.MethodHead) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + f.updateProperties(headers) + f.Metadata = getMetadataFromHeaders(headers) + return nil +} + +// returns true if the range is larger than 4MB +func isRangeTooBig(fileRange FileRange) bool { + if fileRange.End-fileRange.Start > fourMB { + return true + } + + return false +} + +// ListRanges returns the list of valid ranges for this file. // // See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx -func (f FileServiceClient) ListFileRanges(path string, listRange *FileRange) (FileRanges, error) { +func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) { params := url.Values{"comp": {"rangelist"}} // add optional range to list @@ -269,115 +255,41 @@ func (f FileServiceClient) ListFileRanges(path string, listRange *FileRange) (Fi headers["Range"] = listRange.String() } - var out FileRanges - resp, err := f.listContent(path, params, headers) + resp, err := f.fsc.listContent(f.buildPath(), params, headers) if err != nil { - return out, err + return nil, err } defer resp.body.Close() var cl uint64 cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64) if err != nil { - return out, err + ioutil.ReadAll(resp.body) + return nil, err } + var out FileRanges out.ContentLength = cl out.ETag = resp.headers.Get("ETag") out.LastModified = resp.headers.Get("Last-Modified") err = xmlUnmarshal(resp.body, &out) - return out, err + return &out, err } -// ListShares returns the list of shares in a storage account along with -// pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (f FileServiceClient) ListShares(params ListSharesParameters) (ShareListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - - var out ShareListResponse - resp, err := f.listContent("", q, nil) - if err != nil { - return out, err - } - - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// retrieves directory or share content -func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { +// modifies a range of bytes in this file +func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) (http.Header, error) { + if err := f.fsc.checkForStorageEmulator(); err != nil { return nil, err } - - uri := f.client.getEndpoint(fileServiceName, path, params) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - resp, err := f.client.exec(http.MethodGet, uri, headers, nil) - if err != nil { - return nil, err - } - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - resp.body.Close() - return nil, err - } - - return resp, nil -} - -// CreateDirectory operation creates a new directory with optional metadata in the -// specified share. If a directory with the same name already exists, the operation fails. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx -func (f FileServiceClient) CreateDirectory(path string, metadata map[string]string) error { - return f.createResource(path, resourceDirectory, mergeMDIntoExtraHeaders(metadata, nil)) -} - -// CreateFile operation creates a new file with optional metadata or replaces an existing one. -// Note that this only initializes the file, call PutRange to add content. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx -func (f FileServiceClient) CreateFile(path string, maxSize uint64, metadata map[string]string) error { - extraHeaders := map[string]string{ - "x-ms-content-length": strconv.FormatUint(maxSize, 10), - "x-ms-type": "file", - } - return f.createResource(path, resourceFile, mergeMDIntoExtraHeaders(metadata, extraHeaders)) -} - -// ClearRange releases the specified range of space in storage. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx -func (f FileServiceClient) ClearRange(path string, fileRange FileRange) error { - return f.modifyRange(path, nil, fileRange) -} - -// PutRange writes a range of bytes to a file. Note that the length of bytes must -// match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx -func (f FileServiceClient) PutRange(path string, bytes io.Reader, fileRange FileRange) error { - return f.modifyRange(path, bytes, fileRange) -} - -// modifies a range of bytes in the specified file -func (f FileServiceClient) modifyRange(path string, bytes io.Reader, fileRange FileRange) error { - if err := f.checkForStorageEmulator(); err != nil { - return err - } if fileRange.End < fileRange.Start { - return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") + return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart") } - if bytes != nil && fileRange.End-fileRange.Start > 4194304 { - return errors.New("range cannot exceed 4MB in size") + if bytes != nil && isRangeTooBig(fileRange) { + return nil, errors.New("range cannot exceed 4MB in size") } - uri := f.client.getEndpoint(fileServiceName, path, url.Values{"comp": {"range"}}) + uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{"comp": {"range"}}) // default to clear write := "clear" @@ -395,347 +307,20 @@ func (f FileServiceClient) modifyRange(path string, bytes io.Reader, fileRange F "x-ms-write": write, } - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - resp, err := f.client.exec(http.MethodPut, uri, headers, bytes) - if err != nil { - return err + if contentMD5 != nil { + extraHeaders["Content-MD5"] = *contentMD5 } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetFile operation reads or downloads a file from the system, including its -// metadata and properties. -// -// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file -func (f FileServiceClient) GetFile(path string, fileRange *FileRange) (*FileStream, error) { - var extraHeaders map[string]string - if fileRange != nil { - extraHeaders = map[string]string{ - "Range": fileRange.String(), - } - } - - resp, err := f.getResourceNoClose(path, compNone, resourceFile, http.MethodGet, extraHeaders) + headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders) + resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth) if err != nil { return nil, err } - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil { - resp.body.Close() - return nil, err - } - - props, err := getFileProps(resp.headers) - md := getFileMDFromHeaders(resp.headers) - return &FileStream{Body: resp.body, Properties: props, Metadata: md}, nil + defer readAndCloseBody(resp.body) + return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated}) } -// CreateShare operation creates a new share with optional metadata under the specified account. -// If the share with the same name already exists, the operation fails. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx -func (f FileServiceClient) CreateShare(name string, metadata map[string]string) error { - return f.createResource(ToPathSegment(name), resourceShare, mergeMDIntoExtraHeaders(metadata, nil)) -} - -// DirectoryExists returns true if the specified directory exists on the specified share. -func (f FileServiceClient) DirectoryExists(path string) (bool, error) { - return f.resourceExists(path, resourceDirectory) -} - -// FileExists returns true if the specified file exists. -func (f FileServiceClient) FileExists(path string) (bool, error) { - return f.resourceExists(path, resourceFile) -} - -// ShareExists returns true if a share with given name exists -// on the storage account, otherwise returns false. -func (f FileServiceClient) ShareExists(name string) (bool, error) { - return f.resourceExists(ToPathSegment(name), resourceShare) -} - -// returns true if the specified directory or share exists -func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, error) { - if err := f.checkForStorageEmulator(); err != nil { - return false, err - } - - uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res)) - headers := f.client.getStandardHeaders() - - resp, err := f.client.exec(http.MethodHead, uri, headers, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// GetDirectoryURL gets the canonical URL to the directory with the specified name -// in the specified share. This method does not create a publicly accessible URL if -// the file is private and this method does not check if the directory exists. -func (f FileServiceClient) GetDirectoryURL(path string) string { - return f.client.getEndpoint(fileServiceName, path, url.Values{}) -} - -// GetShareURL gets the canonical URL to the share with the specified name in the -// specified container. This method does not create a publicly accessible URL if -// the file is private and this method does not check if the share exists. -func (f FileServiceClient) GetShareURL(name string) string { - return f.client.getEndpoint(fileServiceName, ToPathSegment(name), url.Values{}) -} - -// CreateDirectoryIfNotExists creates a new directory on the specified share -// if it does not exist. Returns true if directory is newly created or false -// if the directory already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx -func (f FileServiceClient) CreateDirectoryIfNotExists(path string) (bool, error) { - resp, err := f.createResourceNoClose(path, resourceDirectory, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil - } - } - return false, err -} - -// CreateShareIfNotExists creates a new share under the specified account if -// it does not exist. Returns true if container is newly created or false if -// container already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx -func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) { - resp, err := f.createResourceNoClose(ToPathSegment(name), resourceShare, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil - } - } - return false, err -} - -// creates a resource depending on the specified resource type -func (f FileServiceClient) createResource(path string, res resourceType, extraHeaders map[string]string) error { - resp, err := f.createResourceNoClose(path, res, extraHeaders) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// creates a resource depending on the specified resource type, doesn't close the response body -func (f FileServiceClient) createResourceNoClose(path string, res resourceType, extraHeaders map[string]string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - values := getURLInitValues(compNone, res) - uri := f.client.getEndpoint(fileServiceName, path, values) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - return f.client.exec(http.MethodPut, uri, headers, nil) -} - -// GetDirectoryProperties provides various information about the specified directory. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn194272.aspx -func (f FileServiceClient) GetDirectoryProperties(path string) (*DirectoryProperties, error) { - headers, err := f.getResourceHeaders(path, compNone, resourceDirectory, http.MethodHead) - if err != nil { - return nil, err - } - - return &DirectoryProperties{ - LastModified: headers.Get("Last-Modified"), - Etag: headers.Get("Etag"), - }, nil -} - -// GetFileProperties provides various information about the specified file. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166971.aspx -func (f FileServiceClient) GetFileProperties(path string) (*FileProperties, error) { - headers, err := f.getResourceHeaders(path, compNone, resourceFile, http.MethodHead) - if err != nil { - return nil, err - } - return getFileProps(headers) -} - -// returns file properties from the specified HTTP header -func getFileProps(header http.Header) (*FileProperties, error) { - size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) - if err != nil { - return nil, err - } - - return &FileProperties{ - CacheControl: header.Get("Cache-Control"), - ContentLength: size, - ContentType: header.Get("Content-Type"), - CopyCompletionTime: header.Get("x-ms-copy-completion-time"), - CopyID: header.Get("x-ms-copy-id"), - CopyProgress: header.Get("x-ms-copy-progress"), - CopySource: header.Get("x-ms-copy-source"), - CopyStatus: header.Get("x-ms-copy-status"), - CopyStatusDesc: header.Get("x-ms-copy-status-description"), - Disposition: header.Get("Content-Disposition"), - Encoding: header.Get("Content-Encoding"), - Etag: header.Get("ETag"), - Language: header.Get("Content-Language"), - LastModified: header.Get("Last-Modified"), - MD5: header.Get("Content-MD5"), - }, nil -} - -// GetShareProperties provides various information about the specified -// file. See https://msdn.microsoft.com/en-us/library/azure/dn689099.aspx -func (f FileServiceClient) GetShareProperties(name string) (*ShareProperties, error) { - headers, err := f.getResourceHeaders(ToPathSegment(name), compNone, resourceShare, http.MethodHead) - if err != nil { - return nil, err - } - return &ShareProperties{ - LastModified: headers.Get("Last-Modified"), - Etag: headers.Get("Etag"), - Quota: headers.Get("x-ms-share-quota"), - }, nil -} - -// returns HTTP header data for the specified directory or share -func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) { - resp, err := f.getResourceNoClose(path, comp, res, verb, nil) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - return resp.headers, nil -} - -// gets the specified resource, doesn't close the response body -func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - params := getURLInitValues(comp, res) - uri := f.client.getEndpoint(fileServiceName, path, params) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - return f.client.exec(verb, uri, headers, nil) -} - -// SetFileProperties operation sets system properties on the specified file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetFileProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx -func (f FileServiceClient) SetFileProperties(path string, props FileProperties) error { - return f.setResourceHeaders(path, compProperties, resourceFile, headersFromStruct(props)) -} - -// SetShareProperties replaces the ShareHeaders for the specified file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetShareProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx -func (f FileServiceClient) SetShareProperties(name string, shareHeaders ShareHeaders) error { - return f.setResourceHeaders(ToPathSegment(name), compProperties, resourceShare, headersFromStruct(shareHeaders)) -} - -// DeleteDirectory operation removes the specified empty directory. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx -func (f FileServiceClient) DeleteDirectory(path string) error { - return f.deleteResource(path, resourceDirectory) -} - -// DeleteFile operation immediately removes the file from the storage account. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx -func (f FileServiceClient) DeleteFile(path string) error { - return f.deleteResource(path, resourceFile) -} - -// DeleteShare operation marks the specified share for deletion. The share -// and any files contained within it are later deleted during garbage -// collection. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx -func (f FileServiceClient) DeleteShare(name string) error { - return f.deleteResource(ToPathSegment(name), resourceShare) -} - -// DeleteShareIfExists operation marks the specified share for deletion if it -// exists. The share and any files contained within it are later deleted during -// garbage collection. Returns true if share existed and deleted with this call, -// false otherwise. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx -func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) { - resp, err := f.deleteResourceNoClose(ToPathSegment(name), resourceShare) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// deletes the resource and returns the response -func (f FileServiceClient) deleteResource(path string, res resourceType) error { - resp, err := f.deleteResourceNoClose(path, res) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// deletes the resource and returns the response, doesn't close the response body -func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - values := getURLInitValues(compNone, res) - uri := f.client.getEndpoint(fileServiceName, path, values) - return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil) -} - -// SetDirectoryMetadata replaces the metadata for the specified directory. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetDirectoryMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx -func (f FileServiceClient) SetDirectoryMetadata(path string, metadata map[string]string) error { - return f.setResourceHeaders(path, compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(metadata, nil)) -} - -// SetFileMetadata replaces the metadata for the specified file. +// SetMetadata replaces the metadata for this file. // // Some keys may be converted to Camel-Case before sending. All keys // are returned in lower case by GetFileMetadata. HTTP header names @@ -743,136 +328,85 @@ func (f FileServiceClient) SetDirectoryMetadata(path string, metadata map[string // applications either. // // See https://msdn.microsoft.com/en-us/library/azure/dn689097.aspx -func (f FileServiceClient) SetFileMetadata(path string, metadata map[string]string) error { - return f.setResourceHeaders(path, compMetadata, resourceFile, mergeMDIntoExtraHeaders(metadata, nil)) +func (f *File) SetMetadata() error { + headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil)) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + return nil } -// SetShareMetadata replaces the metadata for the specified Share. +// SetProperties sets system properties on this file. // // Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetShareMetadata. HTTP header names +// are returned in lower case by SetFileProperties. HTTP header names // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (f FileServiceClient) SetShareMetadata(name string, metadata map[string]string) error { - return f.setResourceHeaders(ToPathSegment(name), compMetadata, resourceShare, mergeMDIntoExtraHeaders(metadata, nil)) -} - -// merges metadata into extraHeaders and returns extraHeaders -func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string { - if metadata == nil && extraHeaders == nil { - return nil - } - if extraHeaders == nil { - extraHeaders = make(map[string]string) - } - for k, v := range metadata { - extraHeaders[userDefinedMetadataHeaderPrefix+k] = v - } - return extraHeaders -} - -// merges extraHeaders into headers and returns headers -func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { - for k, v := range extraHeaders { - headers[k] = v - } - return headers -} - -// sets extra header data for the specified resource -func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) error { - if err := f.checkForStorageEmulator(); err != nil { - return err - } - - params := getURLInitValues(comp, res) - uri := f.client.getEndpoint(fileServiceName, path, params) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - resp, err := f.client.exec(http.MethodPut, uri, headers, nil) +// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx +func (f *File) SetProperties() error { + headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties)) if err != nil { return err } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// GetDirectoryMetadata returns all user-defined metadata for the specified directory. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427371.aspx -func (f FileServiceClient) GetDirectoryMetadata(path string) (map[string]string, error) { - return f.getMetadata(path, resourceDirectory) -} - -// GetFileMetadata returns all user-defined metadata for the specified file. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689098.aspx -func (f FileServiceClient) GetFileMetadata(path string) (map[string]string, error) { - return f.getMetadata(path, resourceFile) -} - -// GetShareMetadata returns all user-defined metadata for the specified share. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (f FileServiceClient) GetShareMetadata(name string) (map[string]string, error) { - return f.getMetadata(ToPathSegment(name), resourceShare) -} - -// gets metadata for the specified resource -func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet) - if err != nil { - return nil, err - } - - return getFileMDFromHeaders(headers), nil -} - -// returns a map of custom metadata values from the specified HTTP header -func getFileMDFromHeaders(header http.Header) map[string]string { - metadata := make(map[string]string) - for k, v := range header { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["foo"] = content of the last X-Ms-Meta-Foo header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - return metadata -} - -//checkForStorageEmulator determines if the client is setup for use with -//Azure Storage Emulator, and returns a relevant error -func (f FileServiceClient) checkForStorageEmulator() error { - if f.client.accountName == StorageEmulatorAccountName { - return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") - } + f.updateEtagAndLastModified(headers) + return nil +} + +// updates Etag and last modified date +func (f *File) updateEtagAndLastModified(headers http.Header) { + f.Properties.Etag = headers.Get("Etag") + f.Properties.LastModified = headers.Get("Last-Modified") +} + +// updates Etag, last modified date and x-ms-copy-id +func (f *File) updateEtagLastModifiedAndCopyHeaders(headers http.Header) { + f.Properties.Etag = headers.Get("Etag") + f.Properties.LastModified = headers.Get("Last-Modified") + f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") + f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") +} + +// updates file properties from the specified HTTP header +func (f *File) updateProperties(header http.Header) { + size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) + if err == nil { + f.Properties.Length = size + } + + f.updateEtagAndLastModified(header) + f.Properties.CacheControl = header.Get("Cache-Control") + f.Properties.Disposition = header.Get("Content-Disposition") + f.Properties.Encoding = header.Get("Content-Encoding") + f.Properties.Language = header.Get("Content-Language") + f.Properties.MD5 = header.Get("Content-MD5") + f.Properties.Type = header.Get("Content-Type") +} + +// URL gets the canonical URL to this file. +// This method does not create a publicly accessible URL if the file +// is private and this method does not check if the file exists. +func (f *File) URL() string { + return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{}) +} + +// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content. +// Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx +func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) error { + if bytes == nil { + return errors.New("bytes cannot be nil") + } + + headers, err := f.modifyRange(bytes, fileRange, contentMD5) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go new file mode 100644 index 00000000..d68bd7f6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go @@ -0,0 +1,375 @@ +package storage + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strings" +) + +// FileServiceClient contains operations for Microsoft Azure File Service. +type FileServiceClient struct { + client Client + auth authentication +} + +// ListSharesParameters defines the set of customizable parameters to make a +// List Shares call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx +type ListSharesParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +// ShareListResponse contains the response fields from +// ListShares call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx +type ShareListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Shares []Share `xml:"Shares>Share"` +} + +type compType string + +const ( + compNone compType = "" + compList compType = "list" + compMetadata compType = "metadata" + compProperties compType = "properties" + compRangeList compType = "rangelist" +) + +func (ct compType) String() string { + return string(ct) +} + +type resourceType string + +const ( + resourceDirectory resourceType = "directory" + resourceFile resourceType = "" + resourceShare resourceType = "share" +) + +func (rt resourceType) String() string { + return string(rt) +} + +func (p ListSharesParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +func (p ListDirsAndFilesParameters) getParameters() url.Values { + out := url.Values{} + + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// returns url.Values for the specified types +func getURLInitValues(comp compType, res resourceType) url.Values { + values := url.Values{} + if comp != compNone { + values.Set("comp", comp.String()) + } + if res != resourceFile { + values.Set("restype", res.String()) + } + return values +} + +// GetShareReference returns a Share object for the specified share name. +func (f FileServiceClient) GetShareReference(name string) Share { + return Share{ + fsc: &f, + Name: name, + Properties: ShareProperties{ + Quota: -1, + }, + } +} + +// ListShares returns the list of shares in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + + var out ShareListResponse + resp, err := f.listContent("", q, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &out) + + // assign our client to the newly created Share objects + for i := range out.Shares { + out.Shares[i].fsc = &f + } + return &out, err +} + +// GetServiceProperties gets the properties of your storage account's file service. +// File service does not support logging +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties +func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return f.client.getServiceProperties(fileServiceName, f.auth) +} + +// SetServiceProperties sets the properties of your storage account's file service. +// File service does not support logging +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties +func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error { + return f.client.setServiceProperties(props, fileServiceName, f.auth) +} + +// retrieves directory or share content +func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + uri := f.client.getEndpoint(fileServiceName, path, params) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth) + if err != nil { + return nil, err + } + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + readAndCloseBody(resp.body) + return nil, err + } + + return resp, nil +} + +// returns true if the specified resource exists +func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) { + if err := f.checkForStorageEmulator(); err != nil { + return false, nil, err + } + + uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res)) + headers := f.client.getStandardHeaders() + + resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, resp.headers, nil + } + } + return false, nil, err +} + +// creates a resource depending on the specified resource type +func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) { + resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes) +} + +// creates a resource depending on the specified resource type, doesn't close the response body +func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + values := getURLInitValues(compNone, res) + combinedParams := mergeParams(values, urlParams) + uri := f.client.getEndpoint(fileServiceName, path, combinedParams) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + return f.client.exec(http.MethodPut, uri, headers, nil, f.auth) +} + +// returns HTTP header data for the specified directory or share +func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) { + resp, err := f.getResourceNoClose(path, comp, res, verb, nil) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + return resp.headers, nil +} + +// gets the specified resource, doesn't close the response body +func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + params := getURLInitValues(comp, res) + uri := f.client.getEndpoint(fileServiceName, path, params) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + return f.client.exec(verb, uri, headers, nil, f.auth) +} + +// deletes the resource and returns the response +func (f FileServiceClient) deleteResource(path string, res resourceType) error { + resp, err := f.deleteResourceNoClose(path, res) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// deletes the resource and returns the response, doesn't close the response body +func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + values := getURLInitValues(compNone, res) + uri := f.client.getEndpoint(fileServiceName, path, values) + return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth) +} + +// merges metadata into extraHeaders and returns extraHeaders +func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string { + if metadata == nil && extraHeaders == nil { + return nil + } + if extraHeaders == nil { + extraHeaders = make(map[string]string) + } + for k, v := range metadata { + extraHeaders[userDefinedMetadataHeaderPrefix+k] = v + } + return extraHeaders +} + +// merges extraHeaders into headers and returns headers +func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { + for k, v := range extraHeaders { + headers[k] = v + } + return headers +} + +// sets extra header data for the specified resource +func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) (http.Header, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + params := getURLInitValues(comp, res) + uri := f.client.getEndpoint(fileServiceName, path, params) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + + return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// gets metadata for the specified resource +func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet) + if err != nil { + return nil, err + } + + return getMetadataFromHeaders(headers), nil +} + +// returns a map of custom metadata values from the specified HTTP header +func getMetadataFromHeaders(header http.Header) map[string]string { + metadata := make(map[string]string) + for k, v := range header { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["foo"] = content of the last X-Ms-Meta-Foo header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + + if len(metadata) == 0 { + return nil + } + + return metadata +} + +//checkForStorageEmulator determines if the client is setup for use with +//Azure Storage Emulator, and returns a relevant error +func (f FileServiceClient) checkForStorageEmulator() error { + if f.client.accountName == StorageEmulatorAccountName { + return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go index 0cd35784..4031410a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -15,12 +15,6 @@ const ( userDefinedMetadataHeaderPrefix = "X-Ms-Meta-" ) -// QueueServiceClient contains operations for Microsoft Azure Queue Storage -// Service. -type QueueServiceClient struct { - client Client -} - func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) } func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) } func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) } @@ -151,16 +145,17 @@ type QueueMetadataResponse struct { // See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + metadata = c.client.protectUserAgent(metadata) headers := c.client.getStandardHeaders() for k, v := range metadata { headers[userDefinedMetadataHeaderPrefix+k] = v } - resp, err := c.client.exec("PUT", uri, headers, nil) + resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } @@ -179,11 +174,11 @@ func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, err qm.UserDefinedMetadata = make(map[string]string) uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) headers := c.client.getStandardHeaders() - resp, err := c.client.exec("GET", uri, headers, nil) + resp, err := c.client.exec(http.MethodGet, uri, headers, nil, c.auth) if err != nil { return qm, err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) for k, v := range resp.headers { if len(v) != 1 { @@ -212,11 +207,11 @@ func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, err func (c QueueServiceClient) CreateQueue(name string) error { uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) headers := c.client.getStandardHeaders() - resp, err := c.client.exec("PUT", uri, headers, nil) + resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } @@ -225,18 +220,18 @@ func (c QueueServiceClient) CreateQueue(name string) error { // See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx func (c QueueServiceClient) DeleteQueue(name string) error { uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } // QueueExists returns true if a queue with given name exists. func (c QueueServiceClient) QueueExists(name string) (bool, error) { uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}}) - resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth) if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { return resp.statusCode == http.StatusOK, nil } @@ -256,11 +251,11 @@ func (c QueueServiceClient) PutMessage(queue string, message string, params PutM } headers := c.client.getStandardHeaders() headers["Content-Length"] = strconv.Itoa(nn) - resp, err := c.client.exec("POST", uri, headers, body) + resp, err := c.client.exec(http.MethodPost, uri, headers, body, c.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } @@ -269,11 +264,11 @@ func (c QueueServiceClient) PutMessage(queue string, message string, params PutM // See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx func (c QueueServiceClient) ClearMessages(queue string) error { uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{}) - resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } @@ -284,7 +279,7 @@ func (c QueueServiceClient) ClearMessages(queue string) error { func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) { var r GetMessagesResponse uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth) if err != nil { return r, err } @@ -300,7 +295,7 @@ func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParamete func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) { var r PeekMessagesResponse uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth) if err != nil { return r, err } @@ -315,11 +310,11 @@ func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParame func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error { uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{ "popreceipt": {popReceipt}}) - resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } @@ -335,10 +330,10 @@ func (c QueueServiceClient) UpdateMessage(queue string, messageID string, messag } headers := c.client.getStandardHeaders() headers["Content-Length"] = fmt.Sprintf("%d", nn) - resp, err := c.client.exec("PUT", uri, headers, body) + resp, err := c.client.exec(http.MethodPut, uri, headers, body, c.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go new file mode 100644 index 00000000..c2614133 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go @@ -0,0 +1,20 @@ +package storage + +// QueueServiceClient contains operations for Microsoft Azure Queue Storage +// Service. +type QueueServiceClient struct { + client Client + auth authentication +} + +// GetServiceProperties gets the properties of your storage account's queue service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties +func (c *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return c.client.getServiceProperties(queueServiceName, c.auth) +} + +// SetServiceProperties sets the properties of your storage account's queue service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties +func (c *QueueServiceClient) SetServiceProperties(props ServiceProperties) error { + return c.client.setServiceProperties(props, queueServiceName, c.auth) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go new file mode 100644 index 00000000..e190097e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go @@ -0,0 +1,186 @@ +package storage + +import ( + "fmt" + "net/http" + "net/url" + "strconv" +) + +// Share represents an Azure file share. +type Share struct { + fsc *FileServiceClient + Name string `xml:"Name"` + Properties ShareProperties `xml:"Properties"` + Metadata map[string]string +} + +// ShareProperties contains various properties of a share. +type ShareProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + Quota int `xml:"Quota"` +} + +// builds the complete path for this share object. +func (s *Share) buildPath() string { + return fmt.Sprintf("/%s", s.Name) +} + +// Create this share under the associated account. +// If a share with the same name already exists, the operation fails. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (s *Share) Create() error { + headers, err := s.fsc.createResource(s.buildPath(), resourceShare, nil, mergeMDIntoExtraHeaders(s.Metadata, nil), []int{http.StatusCreated}) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + return nil +} + +// CreateIfNotExists creates this share under the associated account if +// it does not exist. Returns true if the share is newly created or false if +// the share already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (s *Share) CreateIfNotExists() (bool, error) { + resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, nil, nil) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + if resp.statusCode == http.StatusCreated { + s.updateEtagAndLastModified(resp.headers) + return true, nil + } + return false, s.FetchAttributes() + } + } + + return false, err +} + +// Delete marks this share for deletion. The share along with any files +// and directories contained within it are later deleted during garbage +// collection. If the share does not exist the operation fails +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (s *Share) Delete() error { + return s.fsc.deleteResource(s.buildPath(), resourceShare) +} + +// DeleteIfExists operation marks this share for deletion if it exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (s *Share) DeleteIfExists() (bool, error) { + resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// Exists returns true if this share already exists +// on the storage account, otherwise returns false. +func (s *Share) Exists() (bool, error) { + exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare) + if exists { + s.updateEtagAndLastModified(headers) + s.updateQuota(headers) + } + return exists, err +} + +// FetchAttributes retrieves metadata and properties for this share. +func (s *Share) FetchAttributes() error { + headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, http.MethodHead) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + s.updateQuota(headers) + s.Metadata = getMetadataFromHeaders(headers) + + return nil +} + +// GetRootDirectoryReference returns a Directory object at the root of this share. +func (s *Share) GetRootDirectoryReference() *Directory { + return &Directory{ + fsc: s.fsc, + share: s, + } +} + +// ServiceClient returns the FileServiceClient associated with this share. +func (s *Share) ServiceClient() *FileServiceClient { + return s.fsc +} + +// SetMetadata replaces the metadata for this share. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetShareMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (s *Share) SetMetadata() error { + headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil)) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + return nil +} + +// SetProperties sets system properties for this share. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by SetShareProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx +func (s *Share) SetProperties() error { + if s.Properties.Quota < 1 || s.Properties.Quota > 5120 { + return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) + } + + headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, map[string]string{ + "x-ms-share-quota": strconv.Itoa(s.Properties.Quota), + }) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + return nil +} + +// updates Etag and last modified date +func (s *Share) updateEtagAndLastModified(headers http.Header) { + s.Properties.Etag = headers.Get("Etag") + s.Properties.LastModified = headers.Get("Last-Modified") +} + +// updates quota value +func (s *Share) updateQuota(headers http.Header) { + quota, err := strconv.Atoi(headers.Get("x-ms-share-quota")) + if err == nil { + s.Properties.Quota = quota + } +} + +// URL gets the canonical URL to this share. This method does not create a publicly accessible +// URL if the share is private and this method does not check if the share exists. +func (s *Share) URL() string { + return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go new file mode 100644 index 00000000..bee1c31a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go @@ -0,0 +1,47 @@ +package storage + +import ( + "strings" + "time" +) + +// AccessPolicyDetailsXML has specifics about an access policy +// annotated with XML details. +type AccessPolicyDetailsXML struct { + StartTime time.Time `xml:"Start"` + ExpiryTime time.Time `xml:"Expiry"` + Permission string `xml:"Permission"` +} + +// SignedIdentifier is a wrapper for a specific policy +type SignedIdentifier struct { + ID string `xml:"Id"` + AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"` +} + +// SignedIdentifiers part of the response from GetPermissions call. +type SignedIdentifiers struct { + SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` +} + +// AccessPolicy is the response type from the GetPermissions call. +type AccessPolicy struct { + SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"` +} + +// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the +// AccessPolicy struct which will get converted to XML. +func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier { + return SignedIdentifier{ + ID: id, + AccessPolicy: AccessPolicyDetailsXML{ + StartTime: startTime.UTC().Round(time.Second), + ExpiryTime: expiryTime.UTC().Round(time.Second), + Permission: permissions, + }, + } +} + +func updatePermissions(permissions, permission string) bool { + return strings.Contains(permissions, permission) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go new file mode 100644 index 00000000..817560b7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go @@ -0,0 +1,118 @@ +package storage + +import ( + "fmt" + "net/http" + "net/url" +) + +// ServiceProperties represents the storage account service properties +type ServiceProperties struct { + Logging *Logging + HourMetrics *Metrics + MinuteMetrics *Metrics + Cors *Cors +} + +// Logging represents the Azure Analytics Logging settings +type Logging struct { + Version string + Delete bool + Read bool + Write bool + RetentionPolicy *RetentionPolicy +} + +// RetentionPolicy indicates if retention is enabled and for how many days +type RetentionPolicy struct { + Enabled bool + Days *int +} + +// Metrics provide request statistics. +type Metrics struct { + Version string + Enabled bool + IncludeAPIs *bool + RetentionPolicy *RetentionPolicy +} + +// Cors includes all the CORS rules +type Cors struct { + CorsRule []CorsRule +} + +// CorsRule includes all settings for a Cors rule +type CorsRule struct { + AllowedOrigins string + AllowedMethods string + MaxAgeInSeconds int + ExposedHeaders string + AllowedHeaders string +} + +func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) { + query := url.Values{ + "restype": {"service"}, + "comp": {"properties"}, + } + uri := c.getEndpoint(service, "", query) + headers := c.getStandardHeaders() + + resp, err := c.exec(http.MethodGet, uri, headers, nil, auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + var out ServiceProperties + err = xmlUnmarshal(resp.body, &out) + if err != nil { + return nil, err + } + + return &out, nil +} + +func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error { + query := url.Values{ + "restype": {"service"}, + "comp": {"properties"}, + } + uri := c.getEndpoint(service, "", query) + + // Ideally, StorageServiceProperties would be the output struct + // This is to avoid golint stuttering, while generating the correct XML + type StorageServiceProperties struct { + Logging *Logging + HourMetrics *Metrics + MinuteMetrics *Metrics + Cors *Cors + } + input := StorageServiceProperties{ + Logging: props.Logging, + HourMetrics: props.HourMetrics, + MinuteMetrics: props.MinuteMetrics, + Cors: props.Cors, + } + + body, length, err := xmlMarshal(input) + if err != nil { + return err + } + + headers := c.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", length) + + resp, err := c.exec(http.MethodPut, uri, headers, body, auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go index 39e99750..4123746e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go @@ -4,16 +4,14 @@ import ( "bytes" "encoding/json" "fmt" + "io" + "io/ioutil" "net/http" "net/url" + "strconv" + "time" ) -// TableServiceClient contains operations for Microsoft Azure Table Storage -// Service. -type TableServiceClient struct { - client Client -} - // AzureTable is the typedef of the Azure Table name type AzureTable string @@ -25,6 +23,17 @@ type createTableRequest struct { TableName string `json:"TableName"` } +// TableAccessPolicy are used for SETTING table policies +type TableAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanAppend bool + CanUpdate bool + CanDelete bool +} + func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) } func (c *TableServiceClient) getStandardHeaders() map[string]string { @@ -34,6 +43,7 @@ func (c *TableServiceClient) getStandardHeaders() map[string]string { "Accept": "application/json;odata=nometadata", "Accept-Charset": "UTF-8", "Content-Type": "application/json", + userAgentHeader: c.client.userAgent, } } @@ -45,18 +55,21 @@ func (c *TableServiceClient) QueryTables() ([]AzureTable, error) { headers := c.getStandardHeaders() headers["Content-Length"] = "0" - resp, err := c.client.execTable("GET", uri, headers, nil) + resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth) if err != nil { return nil, err } defer resp.body.Close() if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + ioutil.ReadAll(resp.body) return nil, err } buf := new(bytes.Buffer) - buf.ReadFrom(resp.body) + if _, err := buf.ReadFrom(resp.body); err != nil { + return nil, err + } var respArray queryTablesResponse if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil { @@ -88,12 +101,12 @@ func (c *TableServiceClient) CreateTable(table AzureTable) error { headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) - resp, err := c.client.execTable("POST", uri, headers, buf) + resp, err := c.client.execInternalJSON(http.MethodPost, uri, headers, buf, c.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { return err @@ -114,12 +127,12 @@ func (c *TableServiceClient) DeleteTable(table AzureTable) error { headers["Content-Length"] = "0" - resp, err := c.client.execTable("DELETE", uri, headers, nil) + resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth) if err != nil { return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { return err @@ -127,3 +140,115 @@ func (c *TableServiceClient) DeleteTable(table AzureTable) error { } return nil } + +// SetTablePermissions sets up table ACL permissions as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Table-ACL +func (c *TableServiceClient) SetTablePermissions(table AzureTable, policies []TableAccessPolicy, timeout uint) (err error) { + params := url.Values{"comp": {"acl"}} + + if timeout > 0 { + params.Add("timeout", fmt.Sprint(timeout)) + } + + uri := c.client.getEndpoint(tableServiceName, string(table), params) + headers := c.client.getStandardHeaders() + + body, length, err := generateTableACLPayload(policies) + if err != nil { + return err + } + headers["Content-Length"] = fmt.Sprintf("%v", length) + + resp, err := c.client.execInternalJSON(http.MethodPut, uri, headers, body, c.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + return nil +} + +func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, tap := range policies { + permission := generateTablePermissions(&tap) + signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +// GetTablePermissions gets the table ACL permissions, as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-acl +func (c *TableServiceClient) GetTablePermissions(table AzureTable, timeout int) (permissionResponse []TableAccessPolicy, err error) { + params := url.Values{"comp": {"acl"}} + + if timeout > 0 { + params.Add("timeout", strconv.Itoa(timeout)) + } + + uri := c.client.getEndpoint(tableServiceName, string(table), params) + headers := c.client.getStandardHeaders() + resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + ioutil.ReadAll(resp.body) + return nil, err + } + + var ap AccessPolicy + err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + out := updateTableAccessPolicy(ap) + return out, nil +} + +func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { + out := []TableAccessPolicy{} + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + tap := TableAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a") + tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") + tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") + + out = append(out, tap) + } + return out +} + +func generateTablePermissions(tap *TableAccessPolicy) (permissions string) { + // generate the permissions string (raud). + // still want the end user API to have bool flags. + permissions = "" + + if tap.CanRead { + permissions += "r" + } + + if tap.CanAppend { + permissions += "a" + } + + if tap.CanUpdate { + permissions += "u" + } + + if tap.CanDelete { + permissions += "d" + } + return permissions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go index a26d9c6f..1758d9f3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go @@ -98,7 +98,7 @@ func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousCo headers["Content-Length"] = "0" - resp, err := c.client.execTable("GET", uri, headers, nil) + resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth) if err != nil { return nil, nil, err @@ -106,12 +106,9 @@ func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousCo contToken := extractContinuationTokenFromHeaders(resp.headers) - if err != nil { - return nil, contToken, err - } defer resp.body.Close() - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { return nil, contToken, err } @@ -127,13 +124,11 @@ func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousCo // The function fails if there is an entity with the same // PartitionKey and RowKey in the table. func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, false, "POST"); err != nil { + if sc, err := c.execTable(table, entity, false, http.MethodPost); err != nil { return checkRespCode(sc, []int{http.StatusCreated}) } - return err + return nil } func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) { @@ -152,10 +147,7 @@ func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, spe headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) - var err error - var resp *odataResponse - - resp, err = c.client.execTable(method, uri, headers, &buf) + resp, err := c.client.execInternalJSON(method, uri, headers, &buf, c.auth) if err != nil { return 0, err @@ -170,12 +162,10 @@ func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, spe // one passed as parameter. The function fails if there is no entity // with the same PartitionKey and RowKey in the table. func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, true, "PUT"); err != nil { + if sc, err := c.execTable(table, entity, true, http.MethodPut); err != nil { return checkRespCode(sc, []int{http.StatusNoContent}) } - return err + return nil } // MergeEntity merges the contents of an entity with the @@ -183,12 +173,10 @@ func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) // The function fails if there is no entity // with the same PartitionKey and RowKey in the table. func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error { - var err error - if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { return checkRespCode(sc, []int{http.StatusNoContent}) } - return err + return nil } // DeleteEntityWithoutCheck deletes the entity matching by @@ -214,7 +202,7 @@ func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, headers["Content-Length"] = "0" headers["If-Match"] = ifMatch - resp, err := c.client.execTable("DELETE", uri, headers, nil) + resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth) if err != nil { return err @@ -231,23 +219,19 @@ func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, // InsertOrReplaceEntity inserts an entity in the specified table // or replaced the existing one. func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, true, "PUT"); err != nil { + if sc, err := c.execTable(table, entity, true, http.MethodPut); err != nil { return checkRespCode(sc, []int{http.StatusNoContent}) } - return err + return nil } // InsertOrMergeEntity inserts an entity in the specified table // or merges the existing one. func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error { - var err error - if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { return checkRespCode(sc, []int{http.StatusNoContent}) } - return err + return nil } func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error { @@ -340,8 +324,12 @@ func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, e } // Reset PartitionKey and RowKey - tEntries[i].SetPartitionKey(pKey) - tEntries[i].SetRowKey(rKey) + if err := tEntries[i].SetPartitionKey(pKey); err != nil { + return nil, err + } + if err := tEntries[i].SetRowKey(rKey); err != nil { + return nil, err + } } return tEntries, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go new file mode 100644 index 00000000..ee5e0a86 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go @@ -0,0 +1,20 @@ +package storage + +// TableServiceClient contains operations for Microsoft Azure Table Storage +// Service. +type TableServiceClient struct { + client Client + auth authentication +} + +// GetServiceProperties gets the properties of your storage account's table service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties +func (c *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return c.client.getServiceProperties(tableServiceName, c.auth) +} + +// SetServiceProperties sets the properties of your storage account's table service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties +func (c *TableServiceClient) SetServiceProperties(props ServiceProperties) error { + return c.client.setServiceProperties(props, tableServiceName, c.auth) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go new file mode 100644 index 00000000..c25fe337 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go @@ -0,0 +1,5 @@ +package storage + +var ( + sdkVersion = "0.1.0" +) diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 00000000..b9d6a27e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 00000000..f4c34d0e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,132 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) [![Build Status](https://travis-ci.org/Azure/go-autorest.svg?branch=master)](https://travis-ci.org/Azure/go-autorest) [![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +## Usage +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +## License + +See LICENSE file. + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 00000000..51f1c4bb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,115 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +import ( + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 00000000..6e076981 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,308 @@ +package azure + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + methodDelete = "DELETE" + methodPatch = "PATCH" + methodPost = "POST" + methodPut = "PUT" + methodGet = "GET" + + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure +// long-running operation. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + if err != nil { + return resp, err + } + pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK} + if !autorest.ResponseHasStatusCode(resp, pollingCodes...) { + return resp, nil + } + + ps := pollingState{} + for err == nil { + err = updatePollingState(resp, &ps) + if err != nil { + break + } + if ps.hasTerminated() { + if !ps.hasSucceeded() { + err = ps + } + break + } + + r, err = newPollingRequest(resp, ps) + if err != nil { + return resp, err + } + + delay = autorest.GetRetryAfter(resp, delay) + resp, err = autorest.SendWithSender(s, r, + autorest.AfterDelay(delay)) + } + + return resp, err + }) + } +} + +func getAsyncOperation(resp *http.Response) string { + return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) +} + +func hasSucceeded(state string) bool { + return state == operationSucceeded +} + +func hasTerminated(state string) bool { + switch state { + case operationCanceled, operationFailed, operationSucceeded: + return true + default: + return false + } +} + +func hasFailed(state string) bool { + return state == operationFailed +} + +type provisioningTracker interface { + state() string + hasSucceeded() bool + hasTerminated() bool +} + +type operationResource struct { + // Note: + // The specification states services should return the "id" field. However some return it as + // "operationId". + ID string `json:"id"` + OperationID string `json:"operationId"` + Name string `json:"name"` + Status string `json:"status"` + Properties map[string]interface{} `json:"properties"` + OperationError ServiceError `json:"error"` + StartTime date.Time `json:"startTime"` + EndTime date.Time `json:"endTime"` + PercentComplete float64 `json:"percentComplete"` +} + +func (or operationResource) state() string { + return or.Status +} + +func (or operationResource) hasSucceeded() bool { + return hasSucceeded(or.state()) +} + +func (or operationResource) hasTerminated() bool { + return hasTerminated(or.state()) +} + +type provisioningProperties struct { + ProvisioningState string `json:"provisioningState"` +} + +type provisioningStatus struct { + Properties provisioningProperties `json:"properties,omitempty"` + ProvisioningError ServiceError `json:"error,omitempty"` +} + +func (ps provisioningStatus) state() string { + return ps.Properties.ProvisioningState +} + +func (ps provisioningStatus) hasSucceeded() bool { + return hasSucceeded(ps.state()) +} + +func (ps provisioningStatus) hasTerminated() bool { + return hasTerminated(ps.state()) +} + +func (ps provisioningStatus) hasProvisioningError() bool { + return ps.ProvisioningError != ServiceError{} +} + +type pollingResponseFormat string + +const ( + usesOperationResponse pollingResponseFormat = "OperationResponse" + usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus" + formatIsUnknown pollingResponseFormat = "" +) + +type pollingState struct { + responseFormat pollingResponseFormat + uri string + state string + code string + message string +} + +func (ps pollingState) hasSucceeded() bool { + return hasSucceeded(ps.state) +} + +func (ps pollingState) hasTerminated() bool { + return hasTerminated(ps.state) +} + +func (ps pollingState) hasFailed() bool { + return hasFailed(ps.state) +} + +func (ps pollingState) Error() string { + return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message) +} + +// updatePollingState maps the operation status -- retrieved from either a provisioningState +// field, the status field of an OperationResource, or inferred from the HTTP status code -- +// into a well-known states. Since the process begins from the initial request, the state +// always comes from either a the provisioningState returned or is inferred from the HTTP +// status code. Subsequent requests will read an Azure OperationResource object if the +// service initially returned the Azure-AsyncOperation header. The responseFormat field notes +// the expected response format. +func updatePollingState(resp *http.Response, ps *pollingState) error { + // Determine the response shape + // -- The first response will always be a provisioningStatus response; only the polling requests, + // depending on the header returned, may be something otherwise. + var pt provisioningTracker + if ps.responseFormat == usesOperationResponse { + pt = &operationResource{} + } else { + pt = &provisioningStatus{} + } + + // If this is the first request (that is, the polling response shape is unknown), determine how + // to poll and what to expect + if ps.responseFormat == formatIsUnknown { + req := resp.Request + if req == nil { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing") + } + + // Prefer the Azure-AsyncOperation header + ps.uri = getAsyncOperation(resp) + if ps.uri != "" { + ps.responseFormat = usesOperationResponse + } else { + ps.responseFormat = usesProvisioningStatus + } + + // Else, use the Location header + if ps.uri == "" { + ps.uri = autorest.GetLocation(resp) + } + + // Lastly, requests against an existing resource, use the last request URI + if ps.uri == "" { + m := strings.ToUpper(req.Method) + if m == methodPatch || m == methodPut || m == methodGet { + ps.uri = req.URL.String() + } + } + } + + // Read and interpret the response (saving the Body in case no polling is necessary) + b := &bytes.Buffer{} + err := autorest.Respond(resp, + autorest.ByCopying(b), + autorest.ByUnmarshallingJSON(pt), + autorest.ByClosing()) + resp.Body = ioutil.NopCloser(b) + if err != nil { + return err + } + + // Interpret the results + // -- Terminal states apply regardless + // -- Unknown states are per-service inprogress states + // -- Otherwise, infer state from HTTP status code + if pt.hasTerminated() { + ps.state = pt.state() + } else if pt.state() != "" { + ps.state = operationInProgress + } else { + switch resp.StatusCode { + case http.StatusAccepted: + ps.state = operationInProgress + + case http.StatusNoContent, http.StatusCreated, http.StatusOK: + ps.state = operationSucceeded + + default: + ps.state = operationFailed + } + } + + if ps.state == operationInProgress && ps.uri == "" { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL) + } + + // For failed operation, check for error code and message in + // -- Operation resource + // -- Response + // -- Otherwise, Unknown + if ps.hasFailed() { + if ps.responseFormat == usesOperationResponse { + or := pt.(*operationResource) + ps.code = or.OperationError.Code + ps.message = or.OperationError.Message + } else { + p := pt.(*provisioningStatus) + if p.hasProvisioningError() { + ps.code = p.ProvisioningError.Code + ps.message = p.ProvisioningError.Message + } else { + ps.code = "Unknown" + ps.message = "None" + } + } + } + return nil +} + +func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) { + req := resp.Request + if req == nil { + return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing") + } + + reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel}, + autorest.AsGet(), + autorest.WithBaseURL(ps.uri)) + if err != nil { + return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri) + } + + return reqPoll, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 00000000..3f4d1342 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,180 @@ +/* +Package azure provides Azure-specific implementations used with AutoRest. + +See the included examples for more detail. +*/ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strconv" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Details *[]interface{} `json:"details"` +} + +func (se ServiceError) Error() string { + if se.Details != nil { + d, err := json.Marshal(*(se.Details)) + if err != nil { + return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details) + } + return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d)) + } + return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } else if e.ServiceError == nil { + e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"} + } + + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/config.go b/vendor/github.com/Azure/go-autorest/autorest/azure/config.go new file mode 100644 index 00000000..bea30b0d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/config.go @@ -0,0 +1,13 @@ +package azure + +import ( + "net/url" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorizeEndpoint url.URL + TokenEndpoint url.URL + DeviceCodeEndpoint url.URL +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go new file mode 100644 index 00000000..e1d5498a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go @@ -0,0 +1,193 @@ +package azure + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "fmt" + "net/http" + "net/url" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + logPrefix = "autorest/azure/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + req, _ := autorest.Prepare( + &http.Request{}, + autorest.AsPost(), + autorest.AsFormURLEncoded(), + autorest.WithBaseURL(oauthConfig.DeviceCodeEndpoint.String()), + autorest.WithFormData(url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + }), + ) + + resp, err := autorest.SendWithSender(client, req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err) + } + + var code DeviceCode + err = autorest.Respond( + resp, + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&code), + autorest.ByClosing()) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { + req, _ := autorest.Prepare( + &http.Request{}, + autorest.AsPost(), + autorest.AsFormURLEncoded(), + autorest.WithBaseURL(code.OAuthConfig.TokenEndpoint.String()), + autorest.WithFormData(url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + }), + ) + + resp, err := autorest.SendWithSender(client, req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err) + } + + var token deviceToken + err = autorest.Respond( + resp, + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), + autorest.ByUnmarshallingJSON(&token), + autorest.ByClosing()) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletion(client, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + time.Sleep(waitDuration) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 00000000..4701b437 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,167 @@ +package azure + +import ( + "fmt" + "net/url" + "strings" +) + +const ( + activeDirectoryAPIVersion = "1.0" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.azure.com", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.io", + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.io", + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: "azurecr.io", + } +) + +// EnvironmentFromName returns an Environment based on the common name specified +func EnvironmentFromName(name string) (Environment, error) { + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + return env, nil +} + +// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls +func (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) { + return OAuthConfigForTenant(env.ActiveDirectoryEndpoint, tenantID) +} + +// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls for target cloud auth endpoint +func OAuthConfigForTenant(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + template := "%s/oauth2/%s?api-version=%s" + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "authorize", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, "token", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "devicecode", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go b/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go new file mode 100644 index 00000000..d5cf62dd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go @@ -0,0 +1,59 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/token.go new file mode 100644 index 00000000..cfcd0301 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/token.go @@ -0,0 +1,363 @@ +package azure + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + tokenBaseDate = "1970-01-01T00:00:00Z" + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" +) + +var expirationBase time.Time + +func init() { + expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate) +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := strconv.Atoi(t.ExpiresOn) + if err != nil { + s = -3600 + } + return expirationBase.Add(time.Duration(s) * time.Second).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the AccessToken of the Token. +func (t *Token) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r) + }) + } +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + token.Claims = jwt.MapClaims{ + "aud": spt.oauthConfig.TokenEndpoint.String(), + "iss": spt.clientID, + "sub": spt.clientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + Token + + secret ServicePrincipalSecret + oauthConfig OAuthConfig + clientID string + resource string + autoRefresh bool + refreshWithin time.Duration + sender autorest.Sender + + refreshCallbacks []TokenRefreshCallback +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + spt := &ServicePrincipalToken{ + oauthConfig: oauthConfig, + secret: secret, + clientID: id, + resource: resource, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin). +func (spt *ServicePrincipalToken) EnsureFresh() error { + if spt.WillExpireIn(spt.refreshWithin) { + return spt.Refresh() + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.Token) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "InvokeRefreshCallbacks", nil, "A TokenRefreshCallback handler returned an error") + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.refreshInternal(spt.resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.refreshInternal(resource) +} + +func (spt *ServicePrincipalToken) refreshInternal(resource string) error { + v := url.Values{} + v.Set("client_id", spt.clientID) + v.Set("resource", resource) + + if spt.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.RefreshToken) + } else { + v.Set("grant_type", OAuthGrantTypeClientCredentials) + err := spt.secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + req, _ := autorest.Prepare(&http.Request{}, + autorest.AsPost(), + autorest.AsFormURLEncoded(), + autorest.WithBaseURL(spt.oauthConfig.TokenEndpoint.String()), + autorest.WithFormData(v)) + + resp, err := autorest.SendWithSender(spt.sender, req) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "Refresh", resp, "Failure sending request for Service Principal %s", + spt.clientID) + } + + var newToken Token + err = autorest.Respond(resp, + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&newToken), + autorest.ByClosing()) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "Refresh", resp, "Failure handling response to Service Principal %s request", + spt.clientID) + } + + spt.Token = newToken + + err = spt.InvokeRefreshCallbacks(newToken) + if err != nil { + // its already wrapped inside InvokeRefreshCallbacks + return err + } + + return nil +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.autoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.refreshWithin = d + return +} + +// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) { + spt.sender = s +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken. +// +// By default, the token will automatically refresh if nearly expired (as determined by the +// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing +// tokens. +func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + if spt.autoRefresh { + err := spt.EnsureFresh() + if err != nil { + return r, autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "WithAuthorization", nil, "Failed to refresh Service Principal Token for request to %s", + r.URL) + } + } + return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 00000000..b5f94b5c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,235 @@ +package autorest + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/cookiejar" + "runtime" + "time" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 +) + +var ( + // defaultUserAgent builds a string containing the Go version, system archityecture and OS, + // and the go-autorest version. + defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + Version(), + ) + + statusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: 30 * time.Second, + UserAgent: defaultUserAgent, + } + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + r, err := Prepare(r, + c.WithInspection(), + c.WithAuthorization()) + if err != nil { + return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + resp, err := SendWithSender(c.sender(), r, + DoRetryForStatusCodes(c.RetryAttempts, c.RetryDuration, statusCodesForRetry...)) + Respond(resp, + c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender() Sender { + if c.Sender == nil { + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j} + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 00000000..80ca60e9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,82 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 00000000..c1af6296 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,89 @@ +package date + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 00000000..11995fb9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,86 @@ +package date + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 00000000..207b1a24 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,11 @@ +package date + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 00000000..4bcb8f27 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,80 @@ +package autorest + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 00000000..c9deb261 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,443 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + v.Add(key, value) + } + r.URL.RawQuery = createQuery(v) + } + return r, err + }) + } +} + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 00000000..87f71e58 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,236 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 00000000..9c069781 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,270 @@ +package autorest + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "math" + "net/http" + "time" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(&http.Client{}, r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Cancel) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequest(resp, r.Cancel) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + for attempt := 0; attempt < attempts; attempt++ { + resp, err = s.Do(r) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + b := []byte{} + if r.Body != nil { + b, err = ioutil.ReadAll(r.Body) + if err != nil { + return resp, err + } + } + + // Increment to add the first call (attempts denotes number of retries) + attempts++ + for attempt := 0; attempt < attempts; attempt++ { + r.Body = ioutil.NopCloser(bytes.NewBuffer(b)) + resp, err = s.Do(r) + if err != nil || !ResponseHasStatusCode(resp, codes...) { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + resp, err = s.Do(r) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + select { + case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 00000000..78067148 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,178 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/url" + "reflect" + "sort" + "strings" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using separator. +func String(v interface{}, sep ...string) string { + if len(sep) > 0 { + return ensureValueString(strings.Join(v.([]string), sep[0])) + } + return ensureValueString(v) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// This method is same as Encode() method of "net/url" go package, +// except it does not encode the query parameters because they +// already come encoded. It formats values map in query format (bar=foo&a=b). +func createQuery(v url.Values) string { + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := url.QueryEscape(k) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(v) + } + } + return buf.String() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 00000000..7a0bf9c9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,23 @@ +package autorest + +import ( + "fmt" +) + +const ( + major = "7" + minor = "3" + patch = "0" + tag = "" + semVerFormat = "%s.%s.%s%s" +) + +var version string + +// Version returns the semantic version (see http://semver.org). +func Version() string { + if version == "" { + version = fmt.Sprintf(semVerFormat, major, minor, patch, tag) + } + return version +} diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 00000000..df83a9c2 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 00000000..f48365fa --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,85 @@ +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) + +**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. + + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go new file mode 100644 index 00000000..f0228f02 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/claims.go @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud string, cmp string, required bool) bool { + if aud == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 00000000..a86dc1a3 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 00000000..2f59a222 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,147 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 00000000..d19624b7 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 00000000..1c93024a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 00000000..c2299192 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKey +} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go new file mode 100644 index 00000000..291213c4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/map_claims.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, _ := m["aud"].(string) + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go new file mode 100644 index 00000000..f04d189d --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 00000000..7bf1c4ea --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,131 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + parts := strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + var err error + token := &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 00000000..0ae0b198 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,100 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 00000000..10ee9db8 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 00000000..213a90db --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 00000000..ed1f212b --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 00000000..d637e086 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} From 3da015f8aaa8653690917031108bd45a2bed5cfa Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 24 Apr 2017 15:14:12 -0700 Subject: [PATCH 028/276] reference: allow more than 1 digest algorithm separator This updates the grammar to allow more than one digest algorithm separator, matching the regular expression and intended grammar. Signed-off-by: Stephen J Day --- reference/reference.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reference/reference.go b/reference/reference.go index fd3510e9..2f66cca8 100644 --- a/reference/reference.go +++ b/reference/reference.go @@ -15,7 +15,7 @@ // tag := /[\w][\w.-]{0,127}/ // // digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* // digest-algorithm-separator := /[+.-_]/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value From 7f510ae9c9d15b42c621b6ccb6ef90a6ca94c12b Mon Sep 17 00:00:00 2001 From: Troels Thomsen Date: Mon, 27 Mar 2017 22:04:00 +0200 Subject: [PATCH 029/276] Support session token Signed-off-by: Troels Thomsen --- registry/storage/driver/s3-aws/s3.go | 5 +++++ registry/storage/driver/s3-aws/s3_test.go | 2 ++ registry/storage/driver/s3-aws/s3_v2_signer.go | 3 +++ 3 files changed, 10 insertions(+) diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 469e3997..19407d80 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -98,6 +98,7 @@ type DriverParameters struct { StorageClass string UserAgent string ObjectACL string + SessionToken string } func init() { @@ -331,6 +332,8 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { objectACL = objectACLString } + sessionToken := "" + params := DriverParameters{ fmt.Sprint(accessKey), fmt.Sprint(secretKey), @@ -349,6 +352,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { storageClass, fmt.Sprint(userAgent), objectACL, + fmt.Sprint(sessionToken), } return New(params) @@ -398,6 +402,7 @@ func New(params DriverParameters) (*Driver, error) { Value: credentials.Value{ AccessKeyID: params.AccessKey, SecretAccessKey: params.SecretKey, + SessionToken: params.SessionToken, }, }, &credentials.EnvProvider{}, diff --git a/registry/storage/driver/s3-aws/s3_test.go b/registry/storage/driver/s3-aws/s3_test.go index eb7ee519..363a22eb 100644 --- a/registry/storage/driver/s3-aws/s3_test.go +++ b/registry/storage/driver/s3-aws/s3_test.go @@ -36,6 +36,7 @@ func init() { objectACL := os.Getenv("S3_OBJECT_ACL") root, err := ioutil.TempDir("", "driver-") regionEndpoint := os.Getenv("REGION_ENDPOINT") + sessionToken := os.Getenv("AWS_SESSION_TOKEN") if err != nil { panic(err) } @@ -84,6 +85,7 @@ func init() { storageClass, driverName + "-test", objectACL, + sessionToken, } return New(parameters) diff --git a/registry/storage/driver/s3-aws/s3_v2_signer.go b/registry/storage/driver/s3-aws/s3_v2_signer.go index 7cabe07e..cb801087 100644 --- a/registry/storage/driver/s3-aws/s3_v2_signer.go +++ b/registry/storage/driver/s3-aws/s3_v2_signer.go @@ -137,6 +137,9 @@ func (v2 *signer) Sign() error { host, canonicalPath := parsedURL.Host, parsedURL.Path v2.Request.Header["Host"] = []string{host} v2.Request.Header["date"] = []string{v2.Time.In(time.UTC).Format(time.RFC1123)} + if credValue.SessionToken != "" { + v2.Request.Header["x-amz-security-token"] = []string{credValue.SessionToken} + } smap = make(map[string]string) for k, v := range headers { From 1935c8d50b6b9d3fa8a5fcdb860cd3241a35e8df Mon Sep 17 00:00:00 2001 From: Troels Thomsen Date: Thu, 27 Apr 2017 14:57:47 +0200 Subject: [PATCH 030/276] Return early to prevent nil pointer dereference Signed-off-by: Troels Thomsen --- registry/handlers/app.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/registry/handlers/app.go b/registry/handlers/app.go index 5393e88d..5b4c9f37 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -859,8 +859,11 @@ func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listene // nameRequired returns true if the route requires a name. func (app *App) nameRequired(r *http.Request) bool { route := mux.CurrentRoute(r) + if route == nil { + return true + } routeName := route.GetName() - return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog) + return routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog } // apiBase implements a simple yes-man for doing overall checks against the From 05ac637aec086bc0983a5d25d3f1ad0816203b16 Mon Sep 17 00:00:00 2001 From: Masataka Mizukoshi Date: Mon, 1 May 2017 09:28:32 +0000 Subject: [PATCH 031/276] DOC:configuration.md: Change description of interval. Signed-off-by: Masataka Mizukoshi --- docs/configuration.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index 805328ea..4dfb9a9b 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -223,9 +223,9 @@ notifications: disabled: false url: https://my.listener.com/event headers: - timeout: 500 + timeout: 10s threshold: 5 - backoff: 1000 + backoff: 20s ignoredmediatypes: - application/octet-stream redis: @@ -816,9 +816,9 @@ notifications: disabled: false url: https://my.listener.com/event headers: - timeout: 500 + timeout: 10s threshold: 5 - backoff: 1000 + backoff: 20s ignoredmediatypes: - application/octet-stream ``` @@ -947,7 +947,7 @@ a file. | Parameter | Required | Description | |-----------|----------|-------------------------------------------------------| | `file` | yes | The path to check for existence of a file. | -| `interval`| no | How long to wait before repeating the check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. | +| `interval`| no | How long to wait before repeating the check. A positive integer and an optional suffix indicating the unit of time. The suffix is one of `ns`, `us`, `ms`, `s`, `m`, or `h`. Defaults to `10s` if the value is omitted. If you specify a value but omit the suffix, the value is interpreted as a number of nanoseconds. | ### `http` @@ -1109,7 +1109,7 @@ middleware: baseurl: http://d111111abcdef8.cloudfront.net privatekey: /path/to/asecret.pem keypairid: asecret - duration: 60 + duration: 60s ``` See the configuration reference for [Cloudfront](#cloudfront) for more From 212f47c3186174e209734bd6e7162749fe4583b3 Mon Sep 17 00:00:00 2001 From: Luis Lobo Borobia Date: Wed, 3 May 2017 23:07:31 -0500 Subject: [PATCH 032/276] Fixed #htpasswd link Fixed #htpasswd link Signed-off-by: Luis Lobo Borobia --- docs/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration.md b/docs/configuration.md index 805328ea..26fbf36a 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -552,7 +552,7 @@ The `auth` option is **optional**. Possible auth providers include: - [`silly`](#silly) - [`token`](#token) -- [`htpasswd`](#token) +- [`htpasswd`](#htpasswd) You can configure only one authentication provider. From 7d8dab5fdc23c21546430d4f723e3c540d16b3f7 Mon Sep 17 00:00:00 2001 From: Masataka Mizukoshi Date: Thu, 4 May 2017 19:14:29 +0000 Subject: [PATCH 033/276] DOC:configuration.md: Change description of interval. Signed-off-by: Masataka Mizukoshi --- docs/configuration.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index 4dfb9a9b..995fe5dc 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -223,9 +223,9 @@ notifications: disabled: false url: https://my.listener.com/event headers: - timeout: 10s - threshold: 5 - backoff: 20s + timeout: 1s + threshold: 10 + backoff: 1s ignoredmediatypes: - application/octet-stream redis: @@ -816,9 +816,9 @@ notifications: disabled: false url: https://my.listener.com/event headers: - timeout: 10s - threshold: 5 - backoff: 20s + timeout: 1s + threshold: 10 + backoff: 1s ignoredmediatypes: - application/octet-stream ``` From 37ca688dc079bbaee919cc82a74c288219a4a009 Mon Sep 17 00:00:00 2001 From: william wei <1342247033@qq.com> Date: Tue, 16 May 2017 17:47:13 +0800 Subject: [PATCH 034/276] Remove unused function Signed-off-by: william wei <1342247033@qq.com> --- registry/storage/driver/s3-goamz/s3.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/registry/storage/driver/s3-goamz/s3.go b/registry/storage/driver/s3-goamz/s3.go index e64ee879..b16ca49a 100644 --- a/registry/storage/driver/s3-goamz/s3.go +++ b/registry/storage/driver/s3-goamz/s3.go @@ -555,11 +555,6 @@ func parseError(path string, err error) error { return err } -func hasCode(err error, code string) bool { - s3err, ok := err.(*aws.Error) - return ok && s3err.Code == code -} - func (d *driver) getOptions() s3.Options { return s3.Options{ SSE: d.Encrypt, From df1e488526d3ddb339e7049df5a6c757c1009979 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 2 Mar 2017 16:20:05 -0800 Subject: [PATCH 035/276] Update registry build to use go 1.8 Signed-off-by: Derek McGowan (github: dmcgowan) --- Dockerfile | 2 +- circle.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 426954a1..b0d06f12 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.7-alpine +FROM golang:1.8-alpine ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV DOCKER_BUILDTAGS include_oss include_gcs diff --git a/circle.yml b/circle.yml index 99a639c6..ddc76c86 100644 --- a/circle.yml +++ b/circle.yml @@ -8,7 +8,7 @@ machine: post: # go - - gvm install go1.7 --prefer-binary --name=stable + - gvm install go1.8 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations From f01bcc8f6290ea35c5ad6c7e84491c572c8cac12 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 2 Mar 2017 13:39:41 -0800 Subject: [PATCH 036/276] vendor: update resumable dependency Updates resumable hash implementation to Go 1.8 equivalent. This should be a major speedup, since it includes a number of optimizations from Go 1.7. Signed-off-by: Stephen J Day --- vendor.conf | 2 +- .../stevvooe/resumable/resumable.go | 10 +- .../stevvooe/resumable/sha256/resume.go | 22 +- .../stevvooe/resumable/sha256/sha256.go | 2 +- .../stevvooe/resumable/sha256/sha256block.go | 4 +- .../resumable/sha256/sha256block_386.s | 4 +- .../resumable/sha256/sha256block_amd64.s | 870 +++++++++++++++++- .../resumable/sha256/sha256block_decl.go | 4 +- .../resumable/sha256/sha256block_generic.go | 9 + .../resumable/sha256/sha256block_s390x.go | 12 + .../resumable/sha256/sha256block_s390x.s | 34 + .../stevvooe/resumable/sha512/resume.go | 14 +- .../stevvooe/resumable/sha512/sha512.go | 164 +++- .../stevvooe/resumable/sha512/sha512block.go | 4 +- .../resumable/sha512/sha512block_amd64.s | 2 +- .../resumable/sha512/sha512block_decl.go | 4 +- .../resumable/sha512/sha512block_generic.go | 9 + .../resumable/sha512/sha512block_s390x.go | 12 + .../resumable/sha512/sha512block_s390x.s | 34 + 19 files changed, 1118 insertions(+), 98 deletions(-) create mode 100644 vendor/github.com/stevvooe/resumable/sha256/sha256block_generic.go create mode 100644 vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.go create mode 100644 vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.s create mode 100644 vendor/github.com/stevvooe/resumable/sha512/sha512block_generic.go create mode 100644 vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.go create mode 100644 vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.s diff --git a/vendor.conf b/vendor.conf index 5b12dc49..2af46111 100644 --- a/vendor.conf +++ b/vendor.conf @@ -23,7 +23,7 @@ github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 -github.com/stevvooe/resumable 51ad44105773cafcbe91927f70ac68e1bf78f8b4 +github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 diff --git a/vendor/github.com/stevvooe/resumable/resumable.go b/vendor/github.com/stevvooe/resumable/resumable.go index af4488f1..62ec970a 100644 --- a/vendor/github.com/stevvooe/resumable/resumable.go +++ b/vendor/github.com/stevvooe/resumable/resumable.go @@ -26,7 +26,15 @@ // functions. package resumable -import "hash" +import ( + "fmt" + "hash" +) + +var ( + // ErrBadState is returned if Restore fails post-unmarshaling validation. + ErrBadState = fmt.Errorf("bad hash state") +) // Hash is the common interface implemented by all resumable hash functions. type Hash interface { diff --git a/vendor/github.com/stevvooe/resumable/sha256/resume.go b/vendor/github.com/stevvooe/resumable/sha256/resume.go index 426d78ad..4e1cf4e6 100644 --- a/vendor/github.com/stevvooe/resumable/sha256/resume.go +++ b/vendor/github.com/stevvooe/resumable/sha256/resume.go @@ -2,8 +2,10 @@ package sha256 import ( "bytes" + "crypto" "encoding/gob" + "github.com/stevvooe/resumable" // import to ensure that our init function runs after the standard package _ "crypto/sha256" ) @@ -18,10 +20,15 @@ func (d *digest) State() ([]byte, error) { var buf bytes.Buffer encoder := gob.NewEncoder(&buf) + function := crypto.SHA256 + if d.is224 { + function = crypto.SHA224 + } + // We encode this way so that we do not have // to export these fields of the digest struct. vals := []interface{}{ - d.h, d.x, d.nx, d.len, d.is224, + d.h, d.x, d.nx, d.len, function, } for _, val := range vals { @@ -37,10 +44,12 @@ func (d *digest) State() ([]byte, error) { func (d *digest) Restore(state []byte) error { decoder := gob.NewDecoder(bytes.NewReader(state)) + var function uint + // We decode this way so that we do not have // to export these fields of the digest struct. vals := []interface{}{ - &d.h, &d.x, &d.nx, &d.len, &d.is224, + &d.h, &d.x, &d.nx, &d.len, &function, } for _, val := range vals { @@ -49,5 +58,14 @@ func (d *digest) Restore(state []byte) error { } } + switch crypto.Hash(function) { + case crypto.SHA224: + d.is224 = true + case crypto.SHA256: + d.is224 = false + default: + return resumable.ErrBadState + } + return nil } diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256.go b/vendor/github.com/stevvooe/resumable/sha256/sha256.go index d84cebf2..74b05b92 100644 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256.go +++ b/vendor/github.com/stevvooe/resumable/sha256/sha256.go @@ -137,7 +137,7 @@ func (d0 *digest) Sum(in []byte) []byte { func (d *digest) checkSum() [Size]byte { len := d.len - // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. var tmp [64]byte tmp[0] = 0x80 if len%64 < 56 { diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block.go b/vendor/github.com/stevvooe/resumable/sha256/sha256block.go index ca5efd15..d43bbf02 100644 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256block.go +++ b/vendor/github.com/stevvooe/resumable/sha256/sha256block.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !386,!amd64 - // SHA256 block step. // In its own file so that a faster assembly or C version // can be substituted easily. @@ -77,7 +75,7 @@ var _K = []uint32{ 0xc67178f2, } -func block(dig *digest, p []byte) { +func blockGeneric(dig *digest, p []byte) { var w [64]uint32 h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] for len(p) >= chunk { diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s b/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s index 73ae2bf3..33ed027e 100644 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s +++ b/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -141,7 +141,7 @@ MSGSCHEDULE1(index); \ SHA256ROUND(index, const, a, b, c, d, e, f, g, h) -TEXT ·block(SB),0,$296-12 +TEXT ·block(SB),0,$296-16 MOVL p_base+4(FP), SI MOVL p_len+8(FP), DX SHRL $6, DX diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s b/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s index 868eaed4..e9705b94 100644 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s +++ b/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -9,7 +9,18 @@ // The algorithm is detailed in FIPS 180-4: // // http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf -// + +// The avx2-version is described in an Intel White-Paper: +// "Fast SHA-256 Implementations on Intel Architecture Processors" +// To find it, surf to http://www.intel.com/p/en_US/embedded +// and search for that title. +// AVX2 version by Intel, same algorithm as code in Linux kernel: +// https://github.com/torvalds/linux/blob/master/arch/x86/crypto/sha256-avx2-asm.S +// by +// James Guilford +// Kirk Yap +// Tim Chen + // Wt = Mt; for 0 <= t <= 15 // Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 // @@ -140,29 +151,442 @@ MSGSCHEDULE1(index); \ SHA256ROUND(index, const, a, b, c, d, e, f, g, h) -TEXT ·block(SB),0,$264-32 - MOVQ p_base+8(FP), SI - MOVQ p_len+16(FP), DX - SHRQ $6, DX - SHLQ $6, DX - LEAQ (SI)(DX*1), DI - MOVQ DI, 256(SP) - CMPQ SI, DI - JEQ end +// Definitions for AVX2 version - MOVQ dig+0(FP), BP - MOVL (0*4)(BP), R8 // a = H0 - MOVL (1*4)(BP), R9 // b = H1 - MOVL (2*4)(BP), R10 // c = H2 - MOVL (3*4)(BP), R11 // d = H3 - MOVL (4*4)(BP), R12 // e = H4 - MOVL (5*4)(BP), R13 // f = H5 - MOVL (6*4)(BP), R14 // g = H6 - MOVL (7*4)(BP), R15 // h = H7 +// addm (mem), reg +// Add reg to mem using reg-mem add and store +#define addm(P1, P2) \ + ADDL P2, P1; \ + MOVL P1, P2 + +#define XDWORD0 Y4 +#define XDWORD1 Y5 +#define XDWORD2 Y6 +#define XDWORD3 Y7 + +#define XWORD0 X4 +#define XWORD1 X5 +#define XWORD2 X6 +#define XWORD3 X7 + +#define XTMP0 Y0 +#define XTMP1 Y1 +#define XTMP2 Y2 +#define XTMP3 Y3 +#define XTMP4 Y8 +#define XTMP5 Y11 + +#define XFER Y9 + +#define BYTE_FLIP_MASK Y13 // mask to convert LE -> BE +#define X_BYTE_FLIP_MASK X13 + +#define NUM_BYTES DX +#define INP DI + +#define CTX SI // Beginning of digest in memory (a, b, c, ... , h) + +#define a AX +#define b BX +#define c CX +#define d R8 +#define e DX +#define f R9 +#define g R10 +#define h R11 + +#define old_h R11 + +#define TBL BP + +#define SRND SI // SRND is same register as CTX + +#define T1 R12 + +#define y0 R13 +#define y1 R14 +#define y2 R15 +#define y3 DI + +// Offsets +#define XFER_SIZE 2*64*4 +#define INP_END_SIZE 8 +#define INP_SIZE 8 +#define TMP_SIZE 4 + +#define _XFER 0 +#define _INP_END _XFER + XFER_SIZE +#define _INP _INP_END + INP_END_SIZE +#define _TMP _INP + INP_SIZE +#define STACK_SIZE _TMP + TMP_SIZE + +#define ROUND_AND_SCHED_N_0(disp, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \ + ; \ // ############################# RND N + 0 ############################// + MOVL a, y3; \ // y3 = a // MAJA + RORXL $25, e, y0; \ // y0 = e >> 25 // S1A + RORXL $11, e, y1; \ // y1 = e >> 11 // S1B + ; \ + ADDL (disp + 0*4)(SP)(SRND*1), h; \ // h = k + w + h // disp = k + w + ORL c, y3; \ // y3 = a|c // MAJA + VPALIGNR $4, XDWORD2, XDWORD3, XTMP0; \ // XTMP0 = W[-7] + MOVL f, y2; \ // y2 = f // CH + RORXL $13, a, T1; \ // T1 = a >> 13 // S0B + ; \ + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 + XORL g, y2; \ // y2 = f^g // CH + VPADDD XDWORD0, XTMP0, XTMP0; \ // XTMP0 = W[-7] + W[-16] // y1 = (e >> 6) // S1 + RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 + ; \ + ANDL e, y2; \ // y2 = (f^g)&e // CH + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 + RORXL $22, a, y1; \ // y1 = a >> 22 // S0A + ADDL h, d; \ // d = k + w + h + d // -- + ; \ + ANDL b, y3; \ // y3 = (a|c)&b // MAJA + VPALIGNR $4, XDWORD0, XDWORD1, XTMP1; \ // XTMP1 = W[-15] + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 + RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 + ; \ + XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH + VPSRLD $7, XTMP1, XTMP2; \ + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 + MOVL a, T1; \ // T1 = a // MAJB + ANDL c, T1; \ // T1 = a&c // MAJB + ; \ + ADDL y0, y2; \ // y2 = S1 + CH // -- + VPSLLD $(32-7), XTMP1, XTMP3; \ + ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ + ADDL y1, h; \ // h = k + w + h + S0 // -- + ; \ + ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // -- + VPOR XTMP2, XTMP3, XTMP3; \ // XTMP3 = W[-15] ror 7 + ; \ + VPSRLD $18, XTMP1, XTMP2; \ + ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- + ADDL y3, h // h = t1 + S0 + MAJ // -- + +#define ROUND_AND_SCHED_N_1(disp, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \ + ; \ // ################################### RND N + 1 ############################ + ; \ + MOVL a, y3; \ // y3 = a // MAJA + RORXL $25, e, y0; \ // y0 = e >> 25 // S1A + RORXL $11, e, y1; \ // y1 = e >> 11 // S1B + ADDL (disp + 1*4)(SP)(SRND*1), h; \ // h = k + w + h // -- + ORL c, y3; \ // y3 = a|c // MAJA + ; \ + VPSRLD $3, XTMP1, XTMP4; \ // XTMP4 = W[-15] >> 3 + MOVL f, y2; \ // y2 = f // CH + RORXL $13, a, T1; \ // T1 = a >> 13 // S0B + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 + XORL g, y2; \ // y2 = f^g // CH + ; \ + RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 + RORXL $22, a, y1; \ // y1 = a >> 22 // S0A + ANDL e, y2; \ // y2 = (f^g)&e // CH + ADDL h, d; \ // d = k + w + h + d // -- + ; \ + VPSLLD $(32-18), XTMP1, XTMP1; \ + ANDL b, y3; \ // y3 = (a|c)&b // MAJA + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 + ; \ + VPXOR XTMP1, XTMP3, XTMP3; \ + RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 + XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH + ; \ + VPXOR XTMP2, XTMP3, XTMP3; \ // XTMP3 = W[-15] ror 7 ^ W[-15] ror 18 + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 + MOVL a, T1; \ // T1 = a // MAJB + ANDL c, T1; \ // T1 = a&c // MAJB + ADDL y0, y2; \ // y2 = S1 + CH // -- + ; \ + VPXOR XTMP4, XTMP3, XTMP1; \ // XTMP1 = s0 + VPSHUFD $0xFA, XDWORD3, XTMP2; \ // XTMP2 = W[-2] {BBAA} + ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ + ADDL y1, h; \ // h = k + w + h + S0 // -- + ; \ + VPADDD XTMP1, XTMP0, XTMP0; \ // XTMP0 = W[-16] + W[-7] + s0 + ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // -- + ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- + ADDL y3, h; \ // h = t1 + S0 + MAJ // -- + ; \ + VPSRLD $10, XTMP2, XTMP4 // XTMP4 = W[-2] >> 10 {BBAA} + +#define ROUND_AND_SCHED_N_2(disp, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \ + ; \ // ################################### RND N + 2 ############################ + ; \ + MOVL a, y3; \ // y3 = a // MAJA + RORXL $25, e, y0; \ // y0 = e >> 25 // S1A + ADDL (disp + 2*4)(SP)(SRND*1), h; \ // h = k + w + h // -- + ; \ + VPSRLQ $19, XTMP2, XTMP3; \ // XTMP3 = W[-2] ror 19 {xBxA} + RORXL $11, e, y1; \ // y1 = e >> 11 // S1B + ORL c, y3; \ // y3 = a|c // MAJA + MOVL f, y2; \ // y2 = f // CH + XORL g, y2; \ // y2 = f^g // CH + ; \ + RORXL $13, a, T1; \ // T1 = a >> 13 // S0B + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 + VPSRLQ $17, XTMP2, XTMP2; \ // XTMP2 = W[-2] ror 17 {xBxA} + ANDL e, y2; \ // y2 = (f^g)&e // CH + ; \ + RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 + VPXOR XTMP3, XTMP2, XTMP2; \ + ADDL h, d; \ // d = k + w + h + d // -- + ANDL b, y3; \ // y3 = (a|c)&b // MAJA + ; \ + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 + RORXL $22, a, y1; \ // y1 = a >> 22 // S0A + VPXOR XTMP2, XTMP4, XTMP4; \ // XTMP4 = s1 {xBxA} + XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH + ; \ + MOVL f, _TMP(SP); \ + MOVQ $shuff_00BA<>(SB), f; \ // f is used to keep SHUF_00BA + VPSHUFB (f), XTMP4, XTMP4; \ // XTMP4 = s1 {00BA} + MOVL _TMP(SP), f; \ // f is restored + ; \ + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 + RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 + VPADDD XTMP4, XTMP0, XTMP0; \ // XTMP0 = {..., ..., W[1], W[0]} + ; \ + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 + MOVL a, T1; \ // T1 = a // MAJB + ANDL c, T1; \ // T1 = a&c // MAJB + ADDL y0, y2; \ // y2 = S1 + CH // -- + VPSHUFD $80, XTMP0, XTMP2; \ // XTMP2 = W[-2] {DDCC} + ; \ + ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ + ADDL y1, h; \ // h = k + w + h + S0 // -- + ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // -- + ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- + ; \ + ADDL y3, h // h = t1 + S0 + MAJ // -- + +#define ROUND_AND_SCHED_N_3(disp, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \ + ; \ // ################################### RND N + 3 ############################ + ; \ + MOVL a, y3; \ // y3 = a // MAJA + RORXL $25, e, y0; \ // y0 = e >> 25 // S1A + RORXL $11, e, y1; \ // y1 = e >> 11 // S1B + ADDL (disp + 3*4)(SP)(SRND*1), h; \ // h = k + w + h // -- + ORL c, y3; \ // y3 = a|c // MAJA + ; \ + VPSRLD $10, XTMP2, XTMP5; \ // XTMP5 = W[-2] >> 10 {DDCC} + MOVL f, y2; \ // y2 = f // CH + RORXL $13, a, T1; \ // T1 = a >> 13 // S0B + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 + XORL g, y2; \ // y2 = f^g // CH + ; \ + VPSRLQ $19, XTMP2, XTMP3; \ // XTMP3 = W[-2] ror 19 {xDxC} + RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 + ANDL e, y2; \ // y2 = (f^g)&e // CH + ADDL h, d; \ // d = k + w + h + d // -- + ANDL b, y3; \ // y3 = (a|c)&b // MAJA + ; \ + VPSRLQ $17, XTMP2, XTMP2; \ // XTMP2 = W[-2] ror 17 {xDxC} + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 + XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH + ; \ + VPXOR XTMP3, XTMP2, XTMP2; \ + RORXL $22, a, y1; \ // y1 = a >> 22 // S0A + ADDL y0, y2; \ // y2 = S1 + CH // -- + ; \ + VPXOR XTMP2, XTMP5, XTMP5; \ // XTMP5 = s1 {xDxC} + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 + ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // -- + ; \ + RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 + ; \ + MOVL f, _TMP(SP); \ // Save f + MOVQ $shuff_DC00<>(SB), f; \ // SHUF_00DC + VPSHUFB (f), XTMP5, XTMP5; \ // XTMP5 = s1 {DC00} + MOVL _TMP(SP), f; \ // Restore f + ; \ + VPADDD XTMP0, XTMP5, XDWORD0; \ // XDWORD0 = {W[3], W[2], W[1], W[0]} + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 + MOVL a, T1; \ // T1 = a // MAJB + ANDL c, T1; \ // T1 = a&c // MAJB + ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ + ; \ + ADDL y1, h; \ // h = k + w + h + S0 // -- + ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- + ADDL y3, h // h = t1 + S0 + MAJ // -- + +#define DO_ROUND_N_0(disp, a, b, c, d, e, f, g, h, old_h) \ + ; \ // ################################### RND N + 0 ########################### + MOVL f, y2; \ // y2 = f // CH + RORXL $25, e, y0; \ // y0 = e >> 25 // S1A + RORXL $11, e, y1; \ // y1 = e >> 11 // S1B + XORL g, y2; \ // y2 = f^g // CH + ; \ + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 + RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 + ANDL e, y2; \ // y2 = (f^g)&e // CH + ; \ + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 + RORXL $13, a, T1; \ // T1 = a >> 13 // S0B + XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH + RORXL $22, a, y1; \ // y1 = a >> 22 // S0A + MOVL a, y3; \ // y3 = a // MAJA + ; \ + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 + RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 + ADDL (disp + 0*4)(SP)(SRND*1), h; \ // h = k + w + h // -- + ORL c, y3; \ // y3 = a|c // MAJA + ; \ + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 + MOVL a, T1; \ // T1 = a // MAJB + ANDL b, y3; \ // y3 = (a|c)&b // MAJA + ANDL c, T1; \ // T1 = a&c // MAJB + ADDL y0, y2; \ // y2 = S1 + CH // -- + ; \ + ADDL h, d; \ // d = k + w + h + d // -- + ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ + ADDL y1, h; \ // h = k + w + h + S0 // -- + ADDL y2, d // d = k + w + h + d + S1 + CH = d + t1 // -- + +#define DO_ROUND_N_1(disp, a, b, c, d, e, f, g, h, old_h) \ + ; \ // ################################### RND N + 1 ########################### + ADDL y2, old_h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0 // -- + MOVL f, y2; \ // y2 = f // CH + RORXL $25, e, y0; \ // y0 = e >> 25 // S1A + RORXL $11, e, y1; \ // y1 = e >> 11 // S1B + XORL g, y2; \ // y2 = f^g // CH + ; \ + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 + RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 + ANDL e, y2; \ // y2 = (f^g)&e // CH + ADDL y3, old_h; \ // h = t1 + S0 + MAJ // -- + ; \ + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 + RORXL $13, a, T1; \ // T1 = a >> 13 // S0B + XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH + RORXL $22, a, y1; \ // y1 = a >> 22 // S0A + MOVL a, y3; \ // y3 = a // MAJA + ; \ + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 + RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 + ADDL (disp + 1*4)(SP)(SRND*1), h; \ // h = k + w + h // -- + ORL c, y3; \ // y3 = a|c // MAJA + ; \ + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 + MOVL a, T1; \ // T1 = a // MAJB + ANDL b, y3; \ // y3 = (a|c)&b // MAJA + ANDL c, T1; \ // T1 = a&c // MAJB + ADDL y0, y2; \ // y2 = S1 + CH // -- + ; \ + ADDL h, d; \ // d = k + w + h + d // -- + ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ + ADDL y1, h; \ // h = k + w + h + S0 // -- + ; \ + ADDL y2, d // d = k + w + h + d + S1 + CH = d + t1 // -- + +#define DO_ROUND_N_2(disp, a, b, c, d, e, f, g, h, old_h) \ + ; \ // ################################### RND N + 2 ############################## + ADDL y2, old_h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- + MOVL f, y2; \ // y2 = f // CH + RORXL $25, e, y0; \ // y0 = e >> 25 // S1A + RORXL $11, e, y1; \ // y1 = e >> 11 // S1B + XORL g, y2; \ // y2 = f^g // CH + ; \ + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 + RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 + ANDL e, y2; \ // y2 = (f^g)&e // CH + ADDL y3, old_h; \ // h = t1 + S0 + MAJ // -- + ; \ + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 + RORXL $13, a, T1; \ // T1 = a >> 13 // S0B + XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH + RORXL $22, a, y1; \ // y1 = a >> 22 // S0A + MOVL a, y3; \ // y3 = a // MAJA + ; \ + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 + RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 + ADDL (disp + 2*4)(SP)(SRND*1), h; \ // h = k + w + h // -- + ORL c, y3; \ // y3 = a|c // MAJA + ; \ + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 + MOVL a, T1; \ // T1 = a // MAJB + ANDL b, y3; \ // y3 = (a|c)&b // MAJA + ANDL c, T1; \ // T1 = a&c // MAJB + ADDL y0, y2; \ // y2 = S1 + CH // -- + ; \ + ADDL h, d; \ // d = k + w + h + d // -- + ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ + ADDL y1, h; \ // h = k + w + h + S0 // -- + ; \ + ADDL y2, d // d = k + w + h + d + S1 + CH = d + t1 // -- + +#define DO_ROUND_N_3(disp, a, b, c, d, e, f, g, h, old_h) \ + ; \ // ################################### RND N + 3 ########################### + ADDL y2, old_h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- + MOVL f, y2; \ // y2 = f // CH + RORXL $25, e, y0; \ // y0 = e >> 25 // S1A + RORXL $11, e, y1; \ // y1 = e >> 11 // S1B + XORL g, y2; \ // y2 = f^g // CH + ; \ + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 + RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 + ANDL e, y2; \ // y2 = (f^g)&e // CH + ADDL y3, old_h; \ // h = t1 + S0 + MAJ // -- + ; \ + XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 + RORXL $13, a, T1; \ // T1 = a >> 13 // S0B + XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH + RORXL $22, a, y1; \ // y1 = a >> 22 // S0A + MOVL a, y3; \ // y3 = a // MAJA + ; \ + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 + RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 + ADDL (disp + 3*4)(SP)(SRND*1), h; \ // h = k + w + h // -- + ORL c, y3; \ // y3 = a|c // MAJA + ; \ + XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 + MOVL a, T1; \ // T1 = a // MAJB + ANDL b, y3; \ // y3 = (a|c)&b // MAJA + ANDL c, T1; \ // T1 = a&c // MAJB + ADDL y0, y2; \ // y2 = S1 + CH // -- + ; \ + ADDL h, d; \ // d = k + w + h + d // -- + ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ + ADDL y1, h; \ // h = k + w + h + S0 // -- + ; \ + ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // -- + ; \ + ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- + ; \ + ADDL y3, h // h = t1 + S0 + MAJ // -- + +TEXT ·block(SB), 0, $536-32 + CMPB runtime·support_avx2(SB), $0 + JE noavx2bmi2 + CMPB runtime·support_bmi2(SB), $1 // check for RORXL instruction + JE avx2 +noavx2bmi2: + + MOVQ p_base+8(FP), SI + MOVQ p_len+16(FP), DX + SHRQ $6, DX + SHLQ $6, DX + + LEAQ (SI)(DX*1), DI + MOVQ DI, 256(SP) + CMPQ SI, DI + JEQ end + + MOVQ dig+0(FP), BP + MOVL (0*4)(BP), R8 // a = H0 + MOVL (1*4)(BP), R9 // b = H1 + MOVL (2*4)(BP), R10 // c = H2 + MOVL (3*4)(BP), R11 // d = H3 + MOVL (4*4)(BP), R12 // e = H4 + MOVL (5*4)(BP), R13 // f = H5 + MOVL (6*4)(BP), R14 // g = H6 + MOVL (7*4)(BP), R15 // h = H7 loop: - MOVQ SP, BP // message schedule + MOVQ SP, BP SHA256ROUND0(0, 0x428a2f98, R8, R9, R10, R11, R12, R13, R14, R15) SHA256ROUND0(1, 0x71374491, R15, R8, R9, R10, R11, R12, R13, R14) @@ -230,27 +654,391 @@ loop: SHA256ROUND1(62, 0xbef9a3f7, R10, R11, R12, R13, R14, R15, R8, R9) SHA256ROUND1(63, 0xc67178f2, R9, R10, R11, R12, R13, R14, R15, R8) - MOVQ dig+0(FP), BP - ADDL (0*4)(BP), R8 // H0 = a + H0 - MOVL R8, (0*4)(BP) - ADDL (1*4)(BP), R9 // H1 = b + H1 - MOVL R9, (1*4)(BP) - ADDL (2*4)(BP), R10 // H2 = c + H2 - MOVL R10, (2*4)(BP) - ADDL (3*4)(BP), R11 // H3 = d + H3 - MOVL R11, (3*4)(BP) - ADDL (4*4)(BP), R12 // H4 = e + H4 - MOVL R12, (4*4)(BP) - ADDL (5*4)(BP), R13 // H5 = f + H5 - MOVL R13, (5*4)(BP) - ADDL (6*4)(BP), R14 // H6 = g + H6 - MOVL R14, (6*4)(BP) - ADDL (7*4)(BP), R15 // H7 = h + H7 - MOVL R15, (7*4)(BP) + MOVQ dig+0(FP), BP + ADDL (0*4)(BP), R8 // H0 = a + H0 + MOVL R8, (0*4)(BP) + ADDL (1*4)(BP), R9 // H1 = b + H1 + MOVL R9, (1*4)(BP) + ADDL (2*4)(BP), R10 // H2 = c + H2 + MOVL R10, (2*4)(BP) + ADDL (3*4)(BP), R11 // H3 = d + H3 + MOVL R11, (3*4)(BP) + ADDL (4*4)(BP), R12 // H4 = e + H4 + MOVL R12, (4*4)(BP) + ADDL (5*4)(BP), R13 // H5 = f + H5 + MOVL R13, (5*4)(BP) + ADDL (6*4)(BP), R14 // H6 = g + H6 + MOVL R14, (6*4)(BP) + ADDL (7*4)(BP), R15 // H7 = h + H7 + MOVL R15, (7*4)(BP) - ADDQ $64, SI - CMPQ SI, 256(SP) - JB loop + ADDQ $64, SI + CMPQ SI, 256(SP) + JB loop end: RET + +avx2: + MOVQ dig+0(FP), CTX // d.h[8] + MOVQ p_base+8(FP), INP + MOVQ p_len+16(FP), NUM_BYTES + + LEAQ -64(INP)(NUM_BYTES*1), NUM_BYTES // Pointer to the last block + MOVQ NUM_BYTES, _INP_END(SP) + + CMPQ NUM_BYTES, INP + JE avx2_only_one_block + + // Load initial digest + MOVL 0(CTX), a // a = H0 + MOVL 4(CTX), b // b = H1 + MOVL 8(CTX), c // c = H2 + MOVL 12(CTX), d // d = H3 + MOVL 16(CTX), e // e = H4 + MOVL 20(CTX), f // f = H5 + MOVL 24(CTX), g // g = H6 + MOVL 28(CTX), h // h = H7 + +avx2_loop0: // at each iteration works with one block (512 bit) + + VMOVDQU (0*32)(INP), XTMP0 + VMOVDQU (1*32)(INP), XTMP1 + VMOVDQU (2*32)(INP), XTMP2 + VMOVDQU (3*32)(INP), XTMP3 + + MOVQ $flip_mask<>(SB), BP // BYTE_FLIP_MASK + VMOVDQU (BP), BYTE_FLIP_MASK + + // Apply Byte Flip Mask: LE -> BE + VPSHUFB BYTE_FLIP_MASK, XTMP0, XTMP0 + VPSHUFB BYTE_FLIP_MASK, XTMP1, XTMP1 + VPSHUFB BYTE_FLIP_MASK, XTMP2, XTMP2 + VPSHUFB BYTE_FLIP_MASK, XTMP3, XTMP3 + + // Transpose data into high/low parts + VPERM2I128 $0x20, XTMP2, XTMP0, XDWORD0 // w3, w2, w1, w0 + VPERM2I128 $0x31, XTMP2, XTMP0, XDWORD1 // w7, w6, w5, w4 + VPERM2I128 $0x20, XTMP3, XTMP1, XDWORD2 // w11, w10, w9, w8 + VPERM2I128 $0x31, XTMP3, XTMP1, XDWORD3 // w15, w14, w13, w12 + + MOVQ $K256<>(SB), TBL // Loading address of table with round-specific constants + +avx2_last_block_enter: + ADDQ $64, INP + MOVQ INP, _INP(SP) + XORQ SRND, SRND + +avx2_loop1: // for w0 - w47 + // Do 4 rounds and scheduling + VPADDD 0*32(TBL)(SRND*1), XDWORD0, XFER + VMOVDQU XFER, (_XFER + 0*32)(SP)(SRND*1) + ROUND_AND_SCHED_N_0(_XFER + 0*32, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) + ROUND_AND_SCHED_N_1(_XFER + 0*32, h, a, b, c, d, e, f, g, XDWORD0, XDWORD1, XDWORD2, XDWORD3) + ROUND_AND_SCHED_N_2(_XFER + 0*32, g, h, a, b, c, d, e, f, XDWORD0, XDWORD1, XDWORD2, XDWORD3) + ROUND_AND_SCHED_N_3(_XFER + 0*32, f, g, h, a, b, c, d, e, XDWORD0, XDWORD1, XDWORD2, XDWORD3) + + // Do 4 rounds and scheduling + VPADDD 1*32(TBL)(SRND*1), XDWORD1, XFER + VMOVDQU XFER, (_XFER + 1*32)(SP)(SRND*1) + ROUND_AND_SCHED_N_0(_XFER + 1*32, e, f, g, h, a, b, c, d, XDWORD1, XDWORD2, XDWORD3, XDWORD0) + ROUND_AND_SCHED_N_1(_XFER + 1*32, d, e, f, g, h, a, b, c, XDWORD1, XDWORD2, XDWORD3, XDWORD0) + ROUND_AND_SCHED_N_2(_XFER + 1*32, c, d, e, f, g, h, a, b, XDWORD1, XDWORD2, XDWORD3, XDWORD0) + ROUND_AND_SCHED_N_3(_XFER + 1*32, b, c, d, e, f, g, h, a, XDWORD1, XDWORD2, XDWORD3, XDWORD0) + + // Do 4 rounds and scheduling + VPADDD 2*32(TBL)(SRND*1), XDWORD2, XFER + VMOVDQU XFER, (_XFER + 2*32)(SP)(SRND*1) + ROUND_AND_SCHED_N_0(_XFER + 2*32, a, b, c, d, e, f, g, h, XDWORD2, XDWORD3, XDWORD0, XDWORD1) + ROUND_AND_SCHED_N_1(_XFER + 2*32, h, a, b, c, d, e, f, g, XDWORD2, XDWORD3, XDWORD0, XDWORD1) + ROUND_AND_SCHED_N_2(_XFER + 2*32, g, h, a, b, c, d, e, f, XDWORD2, XDWORD3, XDWORD0, XDWORD1) + ROUND_AND_SCHED_N_3(_XFER + 2*32, f, g, h, a, b, c, d, e, XDWORD2, XDWORD3, XDWORD0, XDWORD1) + + // Do 4 rounds and scheduling + VPADDD 3*32(TBL)(SRND*1), XDWORD3, XFER + VMOVDQU XFER, (_XFER + 3*32)(SP)(SRND*1) + ROUND_AND_SCHED_N_0(_XFER + 3*32, e, f, g, h, a, b, c, d, XDWORD3, XDWORD0, XDWORD1, XDWORD2) + ROUND_AND_SCHED_N_1(_XFER + 3*32, d, e, f, g, h, a, b, c, XDWORD3, XDWORD0, XDWORD1, XDWORD2) + ROUND_AND_SCHED_N_2(_XFER + 3*32, c, d, e, f, g, h, a, b, XDWORD3, XDWORD0, XDWORD1, XDWORD2) + ROUND_AND_SCHED_N_3(_XFER + 3*32, b, c, d, e, f, g, h, a, XDWORD3, XDWORD0, XDWORD1, XDWORD2) + + ADDQ $4*32, SRND + CMPQ SRND, $3*4*32 + JB avx2_loop1 + +avx2_loop2: + // w48 - w63 processed with no scheduliung (last 16 rounds) + VPADDD 0*32(TBL)(SRND*1), XDWORD0, XFER + VMOVDQU XFER, (_XFER + 0*32)(SP)(SRND*1) + DO_ROUND_N_0(_XFER + 0*32, a, b, c, d, e, f, g, h, h) + DO_ROUND_N_1(_XFER + 0*32, h, a, b, c, d, e, f, g, h) + DO_ROUND_N_2(_XFER + 0*32, g, h, a, b, c, d, e, f, g) + DO_ROUND_N_3(_XFER + 0*32, f, g, h, a, b, c, d, e, f) + + VPADDD 1*32(TBL)(SRND*1), XDWORD1, XFER + VMOVDQU XFER, (_XFER + 1*32)(SP)(SRND*1) + DO_ROUND_N_0(_XFER + 1*32, e, f, g, h, a, b, c, d, e) + DO_ROUND_N_1(_XFER + 1*32, d, e, f, g, h, a, b, c, d) + DO_ROUND_N_2(_XFER + 1*32, c, d, e, f, g, h, a, b, c) + DO_ROUND_N_3(_XFER + 1*32, b, c, d, e, f, g, h, a, b) + + ADDQ $2*32, SRND + + VMOVDQU XDWORD2, XDWORD0 + VMOVDQU XDWORD3, XDWORD1 + + CMPQ SRND, $4*4*32 + JB avx2_loop2 + + MOVQ dig+0(FP), CTX // d.h[8] + MOVQ _INP(SP), INP + + addm( 0(CTX), a) + addm( 4(CTX), b) + addm( 8(CTX), c) + addm( 12(CTX), d) + addm( 16(CTX), e) + addm( 20(CTX), f) + addm( 24(CTX), g) + addm( 28(CTX), h) + + CMPQ _INP_END(SP), INP + JB done_hash + + XORQ SRND, SRND + +avx2_loop3: // Do second block using previously scheduled results + DO_ROUND_N_0(_XFER + 0*32 + 16, a, b, c, d, e, f, g, h, a) + DO_ROUND_N_1(_XFER + 0*32 + 16, h, a, b, c, d, e, f, g, h) + DO_ROUND_N_2(_XFER + 0*32 + 16, g, h, a, b, c, d, e, f, g) + DO_ROUND_N_3(_XFER + 0*32 + 16, f, g, h, a, b, c, d, e, f) + + DO_ROUND_N_0(_XFER + 1*32 + 16, e, f, g, h, a, b, c, d, e) + DO_ROUND_N_1(_XFER + 1*32 + 16, d, e, f, g, h, a, b, c, d) + DO_ROUND_N_2(_XFER + 1*32 + 16, c, d, e, f, g, h, a, b, c) + DO_ROUND_N_3(_XFER + 1*32 + 16, b, c, d, e, f, g, h, a, b) + + ADDQ $2*32, SRND + CMPQ SRND, $4*4*32 + JB avx2_loop3 + + MOVQ dig+0(FP), CTX // d.h[8] + MOVQ _INP(SP), INP + ADDQ $64, INP + + addm( 0(CTX), a) + addm( 4(CTX), b) + addm( 8(CTX), c) + addm( 12(CTX), d) + addm( 16(CTX), e) + addm( 20(CTX), f) + addm( 24(CTX), g) + addm( 28(CTX), h) + + CMPQ _INP_END(SP), INP + JA avx2_loop0 + JB done_hash + +avx2_do_last_block: + + VMOVDQU 0(INP), XWORD0 + VMOVDQU 16(INP), XWORD1 + VMOVDQU 32(INP), XWORD2 + VMOVDQU 48(INP), XWORD3 + + MOVQ $flip_mask<>(SB), BP + VMOVDQU (BP), X_BYTE_FLIP_MASK + + VPSHUFB X_BYTE_FLIP_MASK, XWORD0, XWORD0 + VPSHUFB X_BYTE_FLIP_MASK, XWORD1, XWORD1 + VPSHUFB X_BYTE_FLIP_MASK, XWORD2, XWORD2 + VPSHUFB X_BYTE_FLIP_MASK, XWORD3, XWORD3 + + MOVQ $K256<>(SB), TBL + + JMP avx2_last_block_enter + +avx2_only_one_block: + // Load initial digest + MOVL 0(CTX), a // a = H0 + MOVL 4(CTX), b // b = H1 + MOVL 8(CTX), c // c = H2 + MOVL 12(CTX), d // d = H3 + MOVL 16(CTX), e // e = H4 + MOVL 20(CTX), f // f = H5 + MOVL 24(CTX), g // g = H6 + MOVL 28(CTX), h // h = H7 + + JMP avx2_do_last_block + +done_hash: + VZEROUPPER + RET + +// shuffle byte order from LE to BE +DATA flip_mask<>+0x00(SB)/8, $0x0405060700010203 +DATA flip_mask<>+0x08(SB)/8, $0x0c0d0e0f08090a0b +DATA flip_mask<>+0x10(SB)/8, $0x0405060700010203 +DATA flip_mask<>+0x18(SB)/8, $0x0c0d0e0f08090a0b +GLOBL flip_mask<>(SB), 8, $32 + +// shuffle xBxA -> 00BA +DATA shuff_00BA<>+0x00(SB)/8, $0x0b0a090803020100 +DATA shuff_00BA<>+0x08(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA shuff_00BA<>+0x10(SB)/8, $0x0b0a090803020100 +DATA shuff_00BA<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF +GLOBL shuff_00BA<>(SB), 8, $32 + +// shuffle xDxC -> DC00 +DATA shuff_DC00<>+0x00(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA shuff_DC00<>+0x08(SB)/8, $0x0b0a090803020100 +DATA shuff_DC00<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA shuff_DC00<>+0x18(SB)/8, $0x0b0a090803020100 +GLOBL shuff_DC00<>(SB), 8, $32 + +// Round specific constants +DATA K256<>+0x00(SB)/4, $0x428a2f98 // k1 +DATA K256<>+0x04(SB)/4, $0x71374491 // k2 +DATA K256<>+0x08(SB)/4, $0xb5c0fbcf // k3 +DATA K256<>+0x0c(SB)/4, $0xe9b5dba5 // k4 +DATA K256<>+0x10(SB)/4, $0x428a2f98 // k1 +DATA K256<>+0x14(SB)/4, $0x71374491 // k2 +DATA K256<>+0x18(SB)/4, $0xb5c0fbcf // k3 +DATA K256<>+0x1c(SB)/4, $0xe9b5dba5 // k4 + +DATA K256<>+0x20(SB)/4, $0x3956c25b // k5 - k8 +DATA K256<>+0x24(SB)/4, $0x59f111f1 +DATA K256<>+0x28(SB)/4, $0x923f82a4 +DATA K256<>+0x2c(SB)/4, $0xab1c5ed5 +DATA K256<>+0x30(SB)/4, $0x3956c25b +DATA K256<>+0x34(SB)/4, $0x59f111f1 +DATA K256<>+0x38(SB)/4, $0x923f82a4 +DATA K256<>+0x3c(SB)/4, $0xab1c5ed5 + +DATA K256<>+0x40(SB)/4, $0xd807aa98 // k9 - k12 +DATA K256<>+0x44(SB)/4, $0x12835b01 +DATA K256<>+0x48(SB)/4, $0x243185be +DATA K256<>+0x4c(SB)/4, $0x550c7dc3 +DATA K256<>+0x50(SB)/4, $0xd807aa98 +DATA K256<>+0x54(SB)/4, $0x12835b01 +DATA K256<>+0x58(SB)/4, $0x243185be +DATA K256<>+0x5c(SB)/4, $0x550c7dc3 + +DATA K256<>+0x60(SB)/4, $0x72be5d74 // k13 - k16 +DATA K256<>+0x64(SB)/4, $0x80deb1fe +DATA K256<>+0x68(SB)/4, $0x9bdc06a7 +DATA K256<>+0x6c(SB)/4, $0xc19bf174 +DATA K256<>+0x70(SB)/4, $0x72be5d74 +DATA K256<>+0x74(SB)/4, $0x80deb1fe +DATA K256<>+0x78(SB)/4, $0x9bdc06a7 +DATA K256<>+0x7c(SB)/4, $0xc19bf174 + +DATA K256<>+0x80(SB)/4, $0xe49b69c1 // k17 - k20 +DATA K256<>+0x84(SB)/4, $0xefbe4786 +DATA K256<>+0x88(SB)/4, $0x0fc19dc6 +DATA K256<>+0x8c(SB)/4, $0x240ca1cc +DATA K256<>+0x90(SB)/4, $0xe49b69c1 +DATA K256<>+0x94(SB)/4, $0xefbe4786 +DATA K256<>+0x98(SB)/4, $0x0fc19dc6 +DATA K256<>+0x9c(SB)/4, $0x240ca1cc + +DATA K256<>+0xa0(SB)/4, $0x2de92c6f // k21 - k24 +DATA K256<>+0xa4(SB)/4, $0x4a7484aa +DATA K256<>+0xa8(SB)/4, $0x5cb0a9dc +DATA K256<>+0xac(SB)/4, $0x76f988da +DATA K256<>+0xb0(SB)/4, $0x2de92c6f +DATA K256<>+0xb4(SB)/4, $0x4a7484aa +DATA K256<>+0xb8(SB)/4, $0x5cb0a9dc +DATA K256<>+0xbc(SB)/4, $0x76f988da + +DATA K256<>+0xc0(SB)/4, $0x983e5152 // k25 - k28 +DATA K256<>+0xc4(SB)/4, $0xa831c66d +DATA K256<>+0xc8(SB)/4, $0xb00327c8 +DATA K256<>+0xcc(SB)/4, $0xbf597fc7 +DATA K256<>+0xd0(SB)/4, $0x983e5152 +DATA K256<>+0xd4(SB)/4, $0xa831c66d +DATA K256<>+0xd8(SB)/4, $0xb00327c8 +DATA K256<>+0xdc(SB)/4, $0xbf597fc7 + +DATA K256<>+0xe0(SB)/4, $0xc6e00bf3 // k29 - k32 +DATA K256<>+0xe4(SB)/4, $0xd5a79147 +DATA K256<>+0xe8(SB)/4, $0x06ca6351 +DATA K256<>+0xec(SB)/4, $0x14292967 +DATA K256<>+0xf0(SB)/4, $0xc6e00bf3 +DATA K256<>+0xf4(SB)/4, $0xd5a79147 +DATA K256<>+0xf8(SB)/4, $0x06ca6351 +DATA K256<>+0xfc(SB)/4, $0x14292967 + +DATA K256<>+0x100(SB)/4, $0x27b70a85 +DATA K256<>+0x104(SB)/4, $0x2e1b2138 +DATA K256<>+0x108(SB)/4, $0x4d2c6dfc +DATA K256<>+0x10c(SB)/4, $0x53380d13 +DATA K256<>+0x110(SB)/4, $0x27b70a85 +DATA K256<>+0x114(SB)/4, $0x2e1b2138 +DATA K256<>+0x118(SB)/4, $0x4d2c6dfc +DATA K256<>+0x11c(SB)/4, $0x53380d13 + +DATA K256<>+0x120(SB)/4, $0x650a7354 +DATA K256<>+0x124(SB)/4, $0x766a0abb +DATA K256<>+0x128(SB)/4, $0x81c2c92e +DATA K256<>+0x12c(SB)/4, $0x92722c85 +DATA K256<>+0x130(SB)/4, $0x650a7354 +DATA K256<>+0x134(SB)/4, $0x766a0abb +DATA K256<>+0x138(SB)/4, $0x81c2c92e +DATA K256<>+0x13c(SB)/4, $0x92722c85 + +DATA K256<>+0x140(SB)/4, $0xa2bfe8a1 +DATA K256<>+0x144(SB)/4, $0xa81a664b +DATA K256<>+0x148(SB)/4, $0xc24b8b70 +DATA K256<>+0x14c(SB)/4, $0xc76c51a3 +DATA K256<>+0x150(SB)/4, $0xa2bfe8a1 +DATA K256<>+0x154(SB)/4, $0xa81a664b +DATA K256<>+0x158(SB)/4, $0xc24b8b70 +DATA K256<>+0x15c(SB)/4, $0xc76c51a3 + +DATA K256<>+0x160(SB)/4, $0xd192e819 +DATA K256<>+0x164(SB)/4, $0xd6990624 +DATA K256<>+0x168(SB)/4, $0xf40e3585 +DATA K256<>+0x16c(SB)/4, $0x106aa070 +DATA K256<>+0x170(SB)/4, $0xd192e819 +DATA K256<>+0x174(SB)/4, $0xd6990624 +DATA K256<>+0x178(SB)/4, $0xf40e3585 +DATA K256<>+0x17c(SB)/4, $0x106aa070 + +DATA K256<>+0x180(SB)/4, $0x19a4c116 +DATA K256<>+0x184(SB)/4, $0x1e376c08 +DATA K256<>+0x188(SB)/4, $0x2748774c +DATA K256<>+0x18c(SB)/4, $0x34b0bcb5 +DATA K256<>+0x190(SB)/4, $0x19a4c116 +DATA K256<>+0x194(SB)/4, $0x1e376c08 +DATA K256<>+0x198(SB)/4, $0x2748774c +DATA K256<>+0x19c(SB)/4, $0x34b0bcb5 + +DATA K256<>+0x1a0(SB)/4, $0x391c0cb3 +DATA K256<>+0x1a4(SB)/4, $0x4ed8aa4a +DATA K256<>+0x1a8(SB)/4, $0x5b9cca4f +DATA K256<>+0x1ac(SB)/4, $0x682e6ff3 +DATA K256<>+0x1b0(SB)/4, $0x391c0cb3 +DATA K256<>+0x1b4(SB)/4, $0x4ed8aa4a +DATA K256<>+0x1b8(SB)/4, $0x5b9cca4f +DATA K256<>+0x1bc(SB)/4, $0x682e6ff3 + +DATA K256<>+0x1c0(SB)/4, $0x748f82ee +DATA K256<>+0x1c4(SB)/4, $0x78a5636f +DATA K256<>+0x1c8(SB)/4, $0x84c87814 +DATA K256<>+0x1cc(SB)/4, $0x8cc70208 +DATA K256<>+0x1d0(SB)/4, $0x748f82ee +DATA K256<>+0x1d4(SB)/4, $0x78a5636f +DATA K256<>+0x1d8(SB)/4, $0x84c87814 +DATA K256<>+0x1dc(SB)/4, $0x8cc70208 + +DATA K256<>+0x1e0(SB)/4, $0x90befffa +DATA K256<>+0x1e4(SB)/4, $0xa4506ceb +DATA K256<>+0x1e8(SB)/4, $0xbef9a3f7 +DATA K256<>+0x1ec(SB)/4, $0xc67178f2 +DATA K256<>+0x1f0(SB)/4, $0x90befffa +DATA K256<>+0x1f4(SB)/4, $0xa4506ceb +DATA K256<>+0x1f8(SB)/4, $0xbef9a3f7 +DATA K256<>+0x1fc(SB)/4, $0xc67178f2 + +GLOBL K256<>(SB), (NOPTR + RODATA), $512 diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go b/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go index a50c9787..fe07e53b 100644 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go +++ b/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go @@ -1,8 +1,8 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build 386 amd64 +// +build 386 amd64 s390x ppc64le package sha256 diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block_generic.go b/vendor/github.com/stevvooe/resumable/sha256/sha256block_generic.go new file mode 100644 index 00000000..a182a5ea --- /dev/null +++ b/vendor/github.com/stevvooe/resumable/sha256/sha256block_generic.go @@ -0,0 +1,9 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!386,!s390x,!ppc64le + +package sha256 + +var block = blockGeneric diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.go b/vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.go new file mode 100644 index 00000000..b7beefef --- /dev/null +++ b/vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.go @@ -0,0 +1,12 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha256 + +// featureCheck reports whether the CPU supports the +// SHA256 compute intermediate message digest (KIMD) +// function code. +func featureCheck() bool + +var useAsm = featureCheck() diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.s b/vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.s new file mode 100644 index 00000000..ee35991f --- /dev/null +++ b/vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.s @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func featureCheck() bool +TEXT ·featureCheck(SB),NOSPLIT,$16-1 + LA tmp-16(SP), R1 + XOR R0, R0 // query function code is 0 + WORD $0xB93E0006 // KIMD (R6 is ignored) + MOVBZ tmp-16(SP), R4 // get the first byte + AND $0x20, R4 // bit 2 (big endian) for SHA256 + CMPBEQ R4, $0, nosha256 + MOVB $1, ret+0(FP) + RET +nosha256: + MOVB $0, ret+0(FP) + RET + +// func block(dig *digest, p []byte) +TEXT ·block(SB),NOSPLIT,$0-32 + MOVBZ ·useAsm(SB), R4 + LMG dig+0(FP), R1, R3 // R2 = &p[0], R3 = len(p) + CMPBNE R4, $1, generic + MOVBZ $2, R0 // SHA256 function code +loop: + WORD $0xB93E0002 // KIMD R2 + BVS loop // continue if interrupted +done: + XOR R0, R0 // restore R0 + RET +generic: + BR ·blockGeneric(SB) diff --git a/vendor/github.com/stevvooe/resumable/sha512/resume.go b/vendor/github.com/stevvooe/resumable/sha512/resume.go index 55b433e7..ed9173e5 100644 --- a/vendor/github.com/stevvooe/resumable/sha512/resume.go +++ b/vendor/github.com/stevvooe/resumable/sha512/resume.go @@ -2,8 +2,11 @@ package sha512 import ( "bytes" + "crypto" "encoding/gob" + "github.com/stevvooe/resumable" + // import to ensure that our init function runs after the standard package _ "crypto/sha512" ) @@ -21,7 +24,7 @@ func (d *digest) State() ([]byte, error) { // We encode this way so that we do not have // to export these fields of the digest struct. vals := []interface{}{ - d.h, d.x, d.nx, d.len, d.is384, + d.h, d.x, d.nx, d.len, d.function, } for _, val := range vals { @@ -40,7 +43,7 @@ func (d *digest) Restore(state []byte) error { // We decode this way so that we do not have // to export these fields of the digest struct. vals := []interface{}{ - &d.h, &d.x, &d.nx, &d.len, &d.is384, + &d.h, &d.x, &d.nx, &d.len, &d.function, } for _, val := range vals { @@ -49,5 +52,12 @@ func (d *digest) Restore(state []byte) error { } } + switch d.function { + case crypto.SHA384, crypto.SHA512, crypto.SHA512_224, crypto.SHA512_256: + break + default: + return resumable.ErrBadState + } + return nil } diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512.go b/vendor/github.com/stevvooe/resumable/sha512/sha512.go index bca7a91e..5603c90f 100644 --- a/vendor/github.com/stevvooe/resumable/sha512/sha512.go +++ b/vendor/github.com/stevvooe/resumable/sha512/sha512.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package sha512 implements the SHA384 and SHA512 hash algorithms as defined -// in FIPS 180-2. +// Package sha512 implements the SHA-384, SHA-512, SHA-512/224, and SHA-512/256 +// hash algorithms as defined in FIPS 180-4. package sha512 import ( @@ -14,16 +14,27 @@ import ( func init() { crypto.RegisterHash(crypto.SHA384, New384) crypto.RegisterHash(crypto.SHA512, New) + crypto.RegisterHash(crypto.SHA512_224, New512_224) + crypto.RegisterHash(crypto.SHA512_256, New512_256) } -// The size of a SHA512 checksum in bytes. -const Size = 64 +const ( + // Size is the size, in bytes, of a SHA-512 checksum. + Size = 64 -// The size of a SHA384 checksum in bytes. -const Size384 = 48 + // Size224 is the size, in bytes, of a SHA-512/224 checksum. + Size224 = 28 -// The blocksize of SHA512 and SHA384 in bytes. -const BlockSize = 128 + // Size256 is the size, in bytes, of a SHA-512/256 checksum. + Size256 = 32 + + // Size384 is the size, in bytes, of a SHA-384 checksum. + Size384 = 48 + + // BlockSize is the block size, in bytes, of the SHA-512/224, + // SHA-512/256, SHA-384 and SHA-512 hash functions. + BlockSize = 128 +) const ( chunk = 128 @@ -35,6 +46,22 @@ const ( init5 = 0x9b05688c2b3e6c1f init6 = 0x1f83d9abfb41bd6b init7 = 0x5be0cd19137e2179 + init0_224 = 0x8c3d37c819544da2 + init1_224 = 0x73e1996689dcd4d6 + init2_224 = 0x1dfab7ae32ff9c82 + init3_224 = 0x679dd514582f9fcf + init4_224 = 0x0f6d2b697bd44da8 + init5_224 = 0x77e36f7304c48942 + init6_224 = 0x3f9d85a86a1d36c8 + init7_224 = 0x1112e6ad91d692a1 + init0_256 = 0x22312194fc2bf72c + init1_256 = 0x9f555fa3c84c64c2 + init2_256 = 0x2393b86b6f53b151 + init3_256 = 0x963877195940eabd + init4_256 = 0x96283ee2a88effe3 + init5_256 = 0xbe5e1e2553863992 + init6_256 = 0x2b0199fc2c85b8aa + init7_256 = 0x0eb72ddc81c52ca2 init0_384 = 0xcbbb9d5dc1059ed8 init1_384 = 0x629a292a367cd507 init2_384 = 0x9159015a3070dd17 @@ -47,24 +74,16 @@ const ( // digest represents the partial evaluation of a checksum. type digest struct { - h [8]uint64 - x [chunk]byte - nx int - len uint64 - is384 bool // mark if this digest is SHA-384 + h [8]uint64 + x [chunk]byte + nx int + len uint64 + function crypto.Hash } func (d *digest) Reset() { - if !d.is384 { - d.h[0] = init0 - d.h[1] = init1 - d.h[2] = init2 - d.h[3] = init3 - d.h[4] = init4 - d.h[5] = init5 - d.h[6] = init6 - d.h[7] = init7 - } else { + switch d.function { + case crypto.SHA384: d.h[0] = init0_384 d.h[1] = init1_384 d.h[2] = init2_384 @@ -73,31 +92,77 @@ func (d *digest) Reset() { d.h[5] = init5_384 d.h[6] = init6_384 d.h[7] = init7_384 + case crypto.SHA512_224: + d.h[0] = init0_224 + d.h[1] = init1_224 + d.h[2] = init2_224 + d.h[3] = init3_224 + d.h[4] = init4_224 + d.h[5] = init5_224 + d.h[6] = init6_224 + d.h[7] = init7_224 + case crypto.SHA512_256: + d.h[0] = init0_256 + d.h[1] = init1_256 + d.h[2] = init2_256 + d.h[3] = init3_256 + d.h[4] = init4_256 + d.h[5] = init5_256 + d.h[6] = init6_256 + d.h[7] = init7_256 + default: + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.h[5] = init5 + d.h[6] = init6 + d.h[7] = init7 } d.nx = 0 d.len = 0 } -// New returns a new hash.Hash computing the SHA512 checksum. +// New returns a new hash.Hash computing the SHA-512 checksum. func New() hash.Hash { - d := new(digest) + d := &digest{function: crypto.SHA512} d.Reset() return d } -// New384 returns a new hash.Hash computing the SHA384 checksum. +// New512_224 returns a new hash.Hash computing the SHA-512/224 checksum. +func New512_224() hash.Hash { + d := &digest{function: crypto.SHA512_224} + d.Reset() + return d +} + +// New512_256 returns a new hash.Hash computing the SHA-512/256 checksum. +func New512_256() hash.Hash { + d := &digest{function: crypto.SHA512_256} + d.Reset() + return d +} + +// New384 returns a new hash.Hash computing the SHA-384 checksum. func New384() hash.Hash { - d := new(digest) - d.is384 = true + d := &digest{function: crypto.SHA384} d.Reset() return d } func (d *digest) Size() int { - if !d.is384 { + switch d.function { + case crypto.SHA512_224: + return Size224 + case crypto.SHA512_256: + return Size256 + case crypto.SHA384: + return Size384 + default: return Size } - return Size384 } func (d *digest) BlockSize() int { return BlockSize } @@ -130,14 +195,20 @@ func (d0 *digest) Sum(in []byte) []byte { d := new(digest) *d = *d0 hash := d.checkSum() - if d.is384 { + switch d.function { + case crypto.SHA384: return append(in, hash[:Size384]...) + case crypto.SHA512_224: + return append(in, hash[:Size224]...) + case crypto.SHA512_256: + return append(in, hash[:Size256]...) + default: + return append(in, hash[:]...) } - return append(in, hash[:]...) } func (d *digest) checkSum() [Size]byte { - // Padding. Add a 1 bit and 0 bits until 112 bytes mod 128. + // Padding. Add a 1 bit and 0 bits until 112 bytes mod 128. len := d.len var tmp [128]byte tmp[0] = 0x80 @@ -159,7 +230,7 @@ func (d *digest) checkSum() [Size]byte { } h := d.h[:] - if d.is384 { + if d.function == crypto.SHA384 { h = d.h[:6] } @@ -180,7 +251,7 @@ func (d *digest) checkSum() [Size]byte { // Sum512 returns the SHA512 checksum of the data. func Sum512(data []byte) [Size]byte { - var d digest + d := digest{function: crypto.SHA512} d.Reset() d.Write(data) return d.checkSum() @@ -188,11 +259,30 @@ func Sum512(data []byte) [Size]byte { // Sum384 returns the SHA384 checksum of the data. func Sum384(data []byte) (sum384 [Size384]byte) { - var d digest - d.is384 = true + d := digest{function: crypto.SHA384} d.Reset() d.Write(data) sum := d.checkSum() copy(sum384[:], sum[:Size384]) return } + +// Sum512_224 returns the Sum512/224 checksum of the data. +func Sum512_224(data []byte) (sum224 [Size224]byte) { + d := digest{function: crypto.SHA512_224} + d.Reset() + d.Write(data) + sum := d.checkSum() + copy(sum224[:], sum[:Size224]) + return +} + +// Sum512_256 returns the Sum512/256 checksum of the data. +func Sum512_256(data []byte) (sum256 [Size256]byte) { + d := digest{function: crypto.SHA512_256} + d.Reset() + d.Write(data) + sum := d.checkSum() + copy(sum256[:], sum[:Size256]) + return +} diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512block.go b/vendor/github.com/stevvooe/resumable/sha512/sha512block.go index 648ae8f7..42e8d19f 100644 --- a/vendor/github.com/stevvooe/resumable/sha512/sha512block.go +++ b/vendor/github.com/stevvooe/resumable/sha512/sha512block.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 - // SHA512 block step. // In its own file so that a faster assembly or C version // can be substituted easily. @@ -93,7 +91,7 @@ var _K = []uint64{ 0x6c44198c4a475817, } -func block(dig *digest, p []byte) { +func blockGeneric(dig *digest, p []byte) { var w [80]uint64 h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] for len(p) >= chunk { diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s b/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s index 2e10233d..87502cdf 100644 --- a/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s +++ b/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s @@ -1,4 +1,4 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go b/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go index bef99de2..8194506b 100644 --- a/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go +++ b/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go @@ -1,8 +1,8 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64 +// +build amd64 s390x ppc64le package sha512 diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512block_generic.go b/vendor/github.com/stevvooe/resumable/sha512/sha512block_generic.go new file mode 100644 index 00000000..08f2e071 --- /dev/null +++ b/vendor/github.com/stevvooe/resumable/sha512/sha512block_generic.go @@ -0,0 +1,9 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!s390x,!ppc64le + +package sha512 + +var block = blockGeneric diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.go b/vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.go new file mode 100644 index 00000000..f05dc18e --- /dev/null +++ b/vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.go @@ -0,0 +1,12 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sha512 + +// featureCheck reports whether the CPU supports the +// SHA512 compute intermediate message digest (KIMD) +// function code. +func featureCheck() bool + +var useAsm = featureCheck() diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.s b/vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.s new file mode 100644 index 00000000..aab81e2b --- /dev/null +++ b/vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.s @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func featureCheck() bool +TEXT ·featureCheck(SB),NOSPLIT,$16-1 + LA tmp-16(SP), R1 + XOR R0, R0 // query function code is 0 + WORD $0xB93E0006 // KIMD (R6 is ignored) + MOVBZ tmp-16(SP), R4 // get the first byte + AND $0x10, R4 // bit 3 (big endian) for SHA512 + CMPBEQ R4, $0, nosha512 + MOVB $1, ret+0(FP) + RET +nosha512: + MOVB $0, ret+0(FP) + RET + +// func block(dig *digest, p []byte) +TEXT ·block(SB),NOSPLIT,$0-32 + MOVBZ ·useAsm(SB), R4 + LMG dig+0(FP), R1, R3 // R2 = &p[0], R3 = len(p) + CMPBNE R4, $1, generic + MOVBZ $3, R0 // SHA512 function code +loop: + WORD $0xB93E0002 // KIMD R2 + BVS loop // continue if interrupted +done: + XOR R0, R0 // restore R0 + RET +generic: + BR ·blockGeneric(SB) From 5573a13f151db5bf2c95b95f31849d2e2b79fc08 Mon Sep 17 00:00:00 2001 From: Nycholas de Oliveira e Oliveira Date: Wed, 17 May 2017 12:02:31 -0300 Subject: [PATCH 037/276] Update README.md Signed-off-by: Nycholas de Oliveira e Oliveira --- contrib/compose/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/compose/README.md b/contrib/compose/README.md index a9522fd2..45050b70 100644 --- a/contrib/compose/README.md +++ b/contrib/compose/README.md @@ -123,13 +123,13 @@ to the 1.0 registry. Requests from newer clients will route to the 2.0 registry. 4. Use `curl` to list the image in the registry. - $ curl -v -X GET http://localhost:32777/v2/registry1/tags/list + $ curl -v -X GET http://localhost:5000/v2/registry_one/tags/list * Hostname was NOT found in DNS cache * Trying 127.0.0.1... * Connected to localhost (127.0.0.1) port 32777 (#0) > GET /v2/registry1/tags/list HTTP/1.1 > User-Agent: curl/7.36.0 - > Host: localhost:32777 + > Host: localhost:5000 > Accept: */* > < HTTP/1.1 200 OK @@ -138,7 +138,7 @@ to the 1.0 registry. Requests from newer clients will route to the 2.0 registry. < Date: Tue, 14 Apr 2015 22:34:13 GMT < Content-Length: 39 < - {"name":"registry1","tags":["latest"]} + {"name":"registry_one","tags":["latest"]} * Connection #0 to host localhost left intact This example refers to the specific port assigned to the 2.0 registry. You saw From 646fc9702ca43365be96ef2127be0bac8e0b0817 Mon Sep 17 00:00:00 2001 From: Elton Stoneman Date: Tue, 23 May 2017 09:58:05 +0100 Subject: [PATCH 038/276] Add ARGs for cross-compiling Add build args. Defaults to Linux/x64 so no change to existing image, but can build for other platforms - e.g. ``` docker build --build-arg GOOS=windows -t distribution-builder:windows . ``` Signed-off-by: Elton Stoneman --- Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Dockerfile b/Dockerfile index b0d06f12..ac8dbca2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,6 +3,9 @@ FROM golang:1.8-alpine ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV DOCKER_BUILDTAGS include_oss include_gcs +ARG GOOS=linux +ARG GOARCH=amd64 + RUN set -ex \ && apk add --no-cache make git From b0f98e9382d19e10c36572e9a89ba8388efb9d54 Mon Sep 17 00:00:00 2001 From: Misty Stanley-Jones Date: Tue, 23 May 2017 11:13:38 -0700 Subject: [PATCH 039/276] Put architecture.md back into distribution repo Signed-off-by: Misty Stanley-Jones --- docs/architecture.md | 52 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 docs/architecture.md diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 00000000..c2aaa9f2 --- /dev/null +++ b/docs/architecture.md @@ -0,0 +1,52 @@ +--- +published: false +--- + +# Architecture + +## Design +**TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios. + +### Eventual Consistency + +> **NOTE:** This section belongs somewhere, perhaps in a design document. We +> are leaving this here so the information is not lost. + +Running the registry on eventually consistent backends has been part of the +design from the beginning. This section covers some of the approaches to +dealing with this reality. + +There are a few classes of issues that we need to worry about when +implementing something on top of the storage drivers: + +1. Read-After-Write consistency (see this [article on + s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)). +2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict). + +In reality, the registry must worry about these kinds of errors when doing the +following: + +1. Accepting data into a temporary upload file may not have latest data block + yet (read-after-write). +2. Moving uploaded data into its blob location (write-write race). +3. Modifying the "current" manifest for given tag (write-write race). +4. A whole slew of operations around deletes (read-after-write, delete-write + races, garbage collection, etc.). + +The backend path layout employs a few techniques to avoid these problems: + +1. Large writes are done to private upload directories. This alleviates most + of the corruption potential under multiple writers by avoiding multiple + writers. +2. Constraints in storage driver implementations, such as support for writing + after the end of a file to extend it. +3. Digest verification to avoid data corruption. +4. Manifest files are stored by digest and cannot change. +5. All other non-content files (links, hashes, etc.) are written as an atomic + unit. Anything that requires additions and deletions is broken out into + separate "files". Last writer still wins. + +Unfortunately, one must play this game when trying to build something like +this on top of eventually consistent storage systems. If we run into serious +problems, we can wrap the storagedrivers in a shared consistency layer but +that would increase complexity and hinder registry cluster performance. From 258345ba0d6d530babbeec91d7ae17dcdee7206f Mon Sep 17 00:00:00 2001 From: Oleg Bulatov Date: Fri, 2 Jun 2017 13:17:42 +0200 Subject: [PATCH 040/276] Fix signalling Wait in regulator.enter In some conditions, regulator.exit may not send a signal to blocked regulator.enter. Let's assume we are in the critical section of regulator.exit and r.available is equal to 0. And there are three more gorotines. One goroutine also executes regulator.exit and waits for the lock. Rest run regulator.enter and wait for the signal. We send the signal, and after releasing the lock, there will be lock contention: 1. Wait from regulator.enter 2. Lock from regulator.exit If the winner is Lock from regulator.exit, we will not send another signal to unlock the second Wait. Signed-off-by: Oleg Bulatov --- registry/storage/driver/base/regulator.go | 6 +- .../storage/driver/base/regulator_test.go | 67 +++++++++++++++++++ 2 files changed, 68 insertions(+), 5 deletions(-) create mode 100644 registry/storage/driver/base/regulator_test.go diff --git a/registry/storage/driver/base/regulator.go b/registry/storage/driver/base/regulator.go index 185160a4..1e929f83 100644 --- a/registry/storage/driver/base/regulator.go +++ b/registry/storage/driver/base/regulator.go @@ -38,11 +38,7 @@ func (r *regulator) enter() { func (r *regulator) exit() { r.L.Lock() - // We only need to signal to a waiting FS operation if we're already at the - // limit of threads used - if r.available == 0 { - r.Signal() - } + r.Signal() r.available++ r.L.Unlock() } diff --git a/registry/storage/driver/base/regulator_test.go b/registry/storage/driver/base/regulator_test.go new file mode 100644 index 00000000..e4c0ad58 --- /dev/null +++ b/registry/storage/driver/base/regulator_test.go @@ -0,0 +1,67 @@ +package base + +import ( + "sync" + "testing" + "time" +) + +func TestRegulatorEnterExit(t *testing.T) { + const limit = 500 + + r := NewRegulator(nil, limit).(*regulator) + + for try := 0; try < 50; try++ { + run := make(chan struct{}) + + var firstGroupReady sync.WaitGroup + var firstGroupDone sync.WaitGroup + firstGroupReady.Add(limit) + firstGroupDone.Add(limit) + for i := 0; i < limit; i++ { + go func() { + r.enter() + firstGroupReady.Done() + <-run + r.exit() + firstGroupDone.Done() + }() + } + firstGroupReady.Wait() + + // now we exhausted all the limit, let's run a little bit more + var secondGroupReady sync.WaitGroup + var secondGroupDone sync.WaitGroup + for i := 0; i < 50; i++ { + secondGroupReady.Add(1) + secondGroupDone.Add(1) + go func() { + secondGroupReady.Done() + r.enter() + r.exit() + secondGroupDone.Done() + }() + } + secondGroupReady.Wait() + + // allow the first group to return resources + close(run) + + done := make(chan struct{}) + go func() { + secondGroupDone.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("some r.enter() are still locked") + } + + firstGroupDone.Wait() + + if r.available != limit { + t.Fatalf("r.available: got %d, want %d", r.available, limit) + } + } +} From 5b1b6afae2b9852521166365e537cdcc8f204f85 Mon Sep 17 00:00:00 2001 From: Oleg Bulatov Date: Fri, 16 Jun 2017 15:51:29 +0200 Subject: [PATCH 041/276] Update Docker-Content-Digest if manifest list is rewritten If the client doesn't support manifest lists, the registry will rewrite a manifest list into the old format. The Docker-Content-Digest header should be updated in this case. Signed-off-by: Oleg Bulatov --- registry/handlers/manifests.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go index 75e3d1dd..f406903f 100644 --- a/registry/handlers/manifests.go +++ b/registry/handlers/manifests.go @@ -183,6 +183,8 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) if err != nil { return } + } else { + imh.Digest = manifestDigest } } From 5ccd03d28ae2b23a3b2863216bcb97e9f650f6d2 Mon Sep 17 00:00:00 2001 From: Ina Panova Date: Fri, 23 Jun 2017 12:18:01 +0200 Subject: [PATCH 042/276] Fixing image manifest schema2 medaitype typo in docs. Signed-off-by: Ina Panova --- docs/spec/manifest-v2-2.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/spec/manifest-v2-2.md b/docs/spec/manifest-v2-2.md index b1337f08..2319cf6d 100644 --- a/docs/spec/manifest-v2-2.md +++ b/docs/spec/manifest-v2-2.md @@ -60,8 +60,8 @@ image manifest based on the Content-Type returned in the HTTP response. - **`mediaType`** *string* The MIME type of the referenced object. This will generally be - `application/vnd.docker.image.manifest.v2+json`, but it could also - be `application/vnd.docker.image.manifest.v1+json` if the manifest + `application/vnd.docker.distribution.manifest.v2+json`, but it could also + be `application/vnd.docker.distribution.manifest.v1+json` if the manifest list references a legacy schema-1 manifest. - **`size`** *int* @@ -123,7 +123,7 @@ image manifest based on the Content-Type returned in the HTTP response. "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", "manifests": [ { - "mediaType": "application/vnd.docker.image.manifest.v2+json", + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 7143, "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", "platform": { @@ -132,7 +132,7 @@ image manifest based on the Content-Type returned in the HTTP response. } }, { - "mediaType": "application/vnd.docker.image.manifest.v2+json", + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 7682, "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", "platform": { From a97d7c0c155be14d1380c075d3197a5d89c2abc2 Mon Sep 17 00:00:00 2001 From: Igor Morozov Date: Fri, 23 Jun 2017 12:45:04 -0700 Subject: [PATCH 043/276] moved Sirupsen to sirupsen on a case sensitive system Signed-off-by: Igor Morozov --- BUILDING.md | 2 +- configuration/parser.go | 2 +- context/http.go | 2 +- context/logger.go | 2 +- contrib/token-server/main.go | 2 +- manifest/schema1/verify.go | 2 +- notifications/sinks.go | 2 +- notifications/sinks_test.go | 2 +- registry/auth/token/token.go | 2 +- registry/client/auth/session.go | 2 +- registry/handlers/app.go | 2 +- registry/handlers/hooks.go | 2 +- registry/registry.go | 2 +- registry/storage/blobwriter.go | 2 +- registry/storage/blobwriter_resumable.go | 2 +- registry/storage/driver/gcs/gcs.go | 2 +- registry/storage/driver/oss/oss.go | 2 +- .../storage/driver/s3-aws/s3_v2_signer.go | 2 +- registry/storage/purgeuploads.go | 2 +- vendor.conf | 4 +- .../Sirupsen/logrus/json_formatter.go | 41 ---- .../Sirupsen/logrus/terminal_solaris.go | 15 -- .../Sirupsen/logrus/terminal_windows.go | 27 --- .../logrus-logstash-hook/README.md | 19 +- .../logrus-logstash-hook/logstash.go | 8 +- .../logstash_formatter.go | 7 +- .../{Sirupsen => sirupsen}/logrus/LICENSE | 0 .../{Sirupsen => sirupsen}/logrus/README.md | 184 ++++++++++++------ .../{Sirupsen => sirupsen}/logrus/alt_exit.go | 2 +- .../{Sirupsen => sirupsen}/logrus/doc.go | 4 +- .../{Sirupsen => sirupsen}/logrus/entry.go | 36 ++-- .../{Sirupsen => sirupsen}/logrus/exported.go | 4 +- .../logrus/formatter.go | 0 .../{Sirupsen => sirupsen}/logrus/hooks.go | 0 .../sirupsen/logrus/json_formatter.go | 74 +++++++ .../{Sirupsen => sirupsen}/logrus/logger.go | 51 +++-- .../{Sirupsen => sirupsen}/logrus/logrus.go | 2 +- .../logrus/terminal_appengine.go | 4 +- .../logrus/terminal_bsd.go | 0 .../logrus/terminal_linux.go | 0 .../logrus/terminal_notwindows.go | 14 +- .../sirupsen/logrus/terminal_solaris.go | 21 ++ .../sirupsen/logrus/terminal_windows.go | 82 ++++++++ .../logrus/text_formatter.go | 55 ++++-- .../{Sirupsen => sirupsen}/logrus/writer.go | 29 ++- 45 files changed, 474 insertions(+), 247 deletions(-) delete mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_solaris.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_windows.go rename vendor/github.com/{Sirupsen => sirupsen}/logrus/LICENSE (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/README.md (73%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/alt_exit.go (96%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/doc.go (83%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/entry.go (89%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/exported.go (99%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/formatter.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/hooks.go (100%) create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go rename vendor/github.com/{Sirupsen => sirupsen}/logrus/logger.go (88%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/logrus.go (99%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_appengine.go (71%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_bsd.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_linux.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_notwindows.go (60%) create mode 100644 vendor/github.com/sirupsen/logrus/terminal_solaris.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_windows.go rename vendor/github.com/{Sirupsen => sirupsen}/logrus/text_formatter.go (73%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/writer.go (54%) diff --git a/BUILDING.md b/BUILDING.md index d52ed0d9..c5982818 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -83,7 +83,7 @@ build: + lint + build github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/Sirupsen/logrus + github.com/sirupsen/logrus github.com/docker/libtrust ... github.com/yvasiyarov/gorelic diff --git a/configuration/parser.go b/configuration/parser.go index 8b81dd5d..b46f7326 100644 --- a/configuration/parser.go +++ b/configuration/parser.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" ) diff --git a/context/http.go b/context/http.go index 7fe9b8ab..7d65c852 100644 --- a/context/http.go +++ b/context/http.go @@ -8,9 +8,9 @@ import ( "sync" "time" - log "github.com/Sirupsen/logrus" "github.com/docker/distribution/uuid" "github.com/gorilla/mux" + log "github.com/sirupsen/logrus" ) // Common errors used with this package. diff --git a/context/logger.go b/context/logger.go index fbb6a051..86c5964e 100644 --- a/context/logger.go +++ b/context/logger.go @@ -3,7 +3,7 @@ package context import ( "fmt" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" "runtime" ) diff --git a/contrib/token-server/main.go b/contrib/token-server/main.go index e9d6d64f..bfbb470b 100644 --- a/contrib/token-server/main.go +++ b/contrib/token-server/main.go @@ -9,13 +9,13 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/auth" _ "github.com/docker/distribution/registry/auth/htpasswd" "github.com/docker/libtrust" "github.com/gorilla/mux" + "github.com/sirupsen/logrus" ) var ( diff --git a/manifest/schema1/verify.go b/manifest/schema1/verify.go index fa8daa56..ef59065c 100644 --- a/manifest/schema1/verify.go +++ b/manifest/schema1/verify.go @@ -3,8 +3,8 @@ package schema1 import ( "crypto/x509" - "github.com/Sirupsen/logrus" "github.com/docker/libtrust" + "github.com/sirupsen/logrus" ) // Verify verifies the signature of the signed manifest returning the public diff --git a/notifications/sinks.go b/notifications/sinks.go index beb8bad4..e1523df7 100644 --- a/notifications/sinks.go +++ b/notifications/sinks.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // NOTE(stevvooe): This file contains definitions for several utility sinks. diff --git a/notifications/sinks_test.go b/notifications/sinks_test.go index 1bfa12c6..2a85c4f4 100644 --- a/notifications/sinks_test.go +++ b/notifications/sinks_test.go @@ -7,7 +7,7 @@ import ( "sync" "time" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" "testing" ) diff --git a/registry/auth/token/token.go b/registry/auth/token/token.go index 850f5813..7f87d496 100644 --- a/registry/auth/token/token.go +++ b/registry/auth/token/token.go @@ -10,8 +10,8 @@ import ( "strings" "time" - log "github.com/Sirupsen/logrus" "github.com/docker/libtrust" + log "github.com/sirupsen/logrus" "github.com/docker/distribution/registry/auth" ) diff --git a/registry/client/auth/session.go b/registry/client/auth/session.go index 3ca5e8b3..be474d82 100644 --- a/registry/client/auth/session.go +++ b/registry/client/auth/session.go @@ -10,10 +10,10 @@ import ( "sync" "time" - "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" + "github.com/sirupsen/logrus" ) var ( diff --git a/registry/handlers/app.go b/registry/handlers/app.go index 5b4c9f37..a5af88dc 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -14,7 +14,6 @@ import ( "strings" "time" - log "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" @@ -38,6 +37,7 @@ import ( "github.com/docker/libtrust" "github.com/garyburd/redigo/redis" "github.com/gorilla/mux" + log "github.com/sirupsen/logrus" "golang.org/x/net/context" ) diff --git a/registry/handlers/hooks.go b/registry/handlers/hooks.go index 7bbab4f8..e51df2b7 100644 --- a/registry/handlers/hooks.go +++ b/registry/handlers/hooks.go @@ -7,7 +7,7 @@ import ( "strings" "text/template" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // logHook is for hooking Panic in web application diff --git a/registry/registry.go b/registry/registry.go index ee3d6b0b..fbb4dd87 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -11,7 +11,6 @@ import ( "rsc.io/letsencrypt" - log "github.com/Sirupsen/logrus" logstash "github.com/bshuster-repo/logrus-logstash-hook" "github.com/bugsnag/bugsnag-go" "github.com/docker/distribution/configuration" @@ -22,6 +21,7 @@ import ( "github.com/docker/distribution/uuid" "github.com/docker/distribution/version" gorhandlers "github.com/gorilla/handlers" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/yvasiyarov/gorelic" ) diff --git a/registry/storage/blobwriter.go b/registry/storage/blobwriter.go index d51e27ad..392b166c 100644 --- a/registry/storage/blobwriter.go +++ b/registry/storage/blobwriter.go @@ -7,11 +7,11 @@ import ( "path" "time" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" ) var ( diff --git a/registry/storage/blobwriter_resumable.go b/registry/storage/blobwriter_resumable.go index ff5482c3..cc1a833d 100644 --- a/registry/storage/blobwriter_resumable.go +++ b/registry/storage/blobwriter_resumable.go @@ -7,9 +7,9 @@ import ( "path" "strconv" - "github.com/Sirupsen/logrus" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/sirupsen/logrus" "github.com/stevvooe/resumable" // register resumable hashes with import diff --git a/registry/storage/driver/gcs/gcs.go b/registry/storage/driver/gcs/gcs.go index 1369c280..8a0be4e9 100644 --- a/registry/storage/driver/gcs/gcs.go +++ b/registry/storage/driver/gcs/gcs.go @@ -37,7 +37,7 @@ import ( "google.golang.org/cloud" "google.golang.org/cloud/storage" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ctx "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index 9797f42d..6c0c35a5 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -24,11 +24,11 @@ import ( "github.com/docker/distribution/context" - "github.com/Sirupsen/logrus" "github.com/denverdino/aliyungo/oss" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/sirupsen/logrus" ) const driverName = "oss" diff --git a/registry/storage/driver/s3-aws/s3_v2_signer.go b/registry/storage/driver/s3-aws/s3_v2_signer.go index cb801087..703660cf 100644 --- a/registry/storage/driver/s3-aws/s3_v2_signer.go +++ b/registry/storage/driver/s3-aws/s3_v2_signer.go @@ -32,11 +32,11 @@ import ( "strings" "time" - log "github.com/Sirupsen/logrus" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/s3" + log "github.com/sirupsen/logrus" ) const ( diff --git a/registry/storage/purgeuploads.go b/registry/storage/purgeuploads.go index 925b1ae9..d6cae4e7 100644 --- a/registry/storage/purgeuploads.go +++ b/registry/storage/purgeuploads.go @@ -5,10 +5,10 @@ import ( "strings" "time" - log "github.com/Sirupsen/logrus" "github.com/docker/distribution/context" storageDriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/uuid" + log "github.com/sirupsen/logrus" ) // uploadData stored the location of temporary files created during a layer upload diff --git a/vendor.conf b/vendor.conf index 2af46111..d67edd77 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,8 +1,8 @@ github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 -github.com/Sirupsen/logrus d26492970760ca5d33129d2d799e34be5c4782eb +github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716 -github.com/bshuster-repo/logrus-logstash-hook 5f729f2fb50a301153cae84ff5c58981d51c095a +github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 2ad6dc5c..00000000 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,41 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(timestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go deleted file mode 100644 index a3c6f6e7..00000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build solaris,!appengine - -package logrus - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) - return err == nil -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 3727e8ad..00000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows,!appengine - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/bshuster-repo/logrus-logstash-hook/README.md b/vendor/github.com/bshuster-repo/logrus-logstash-hook/README.md index 0f281b50..9cc4378c 100644 --- a/vendor/github.com/bshuster-repo/logrus-logstash-hook/README.md +++ b/vendor/github.com/bshuster-repo/logrus-logstash-hook/README.md @@ -7,13 +7,13 @@ Use this hook to send the logs to [Logstash](https://www.elastic.co/products/log package main import ( - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" "github.com/bshuster-repo/logrus-logstash-hook" ) func main() { log := logrus.New() - hook, err := logrus_logstash.NewHook("tcp", "172.17.0.2:9999", "myappName") + hook, err := logrustash.NewHook("tcp", "172.17.0.2:9999", "myappName") if err != nil { log.Fatal(err) @@ -47,7 +47,7 @@ This can be done when creating the hook: ```go -hook, err := logrus_logstash.NewHookWithFields("tcp", "172.17.0.2:9999", "myappName", logrus.Fields{ +hook, err := logrustash.NewHookWithFields("tcp", "172.17.0.2:9999", "myappName", logrus.Fields{ "hostname": os.Hostname(), "serviceName": "myServiceName", }) @@ -83,7 +83,7 @@ For example if you don't want to see the hostname and serviceName on each log li ```go -hook, err := logrus_logstash.NewHookWithFields("tcp", "172.17.0.2:9999", "myappName", logrus.Fields{ +hook, err := logrustash.NewHookWithFields("tcp", "172.17.0.2:9999", "myappName", logrus.Fields{ "_hostname": os.Hostname(), "_serviceName": "myServiceName", }) @@ -93,3 +93,14 @@ hook.WithPrefix("_") There are also constructors available which allow you to specify the prefix from the start. The std-out will not have the '\_hostname' and '\_servicename' fields, and the logstash output will, but the prefix will be dropped from the name. + + +# Authors + +Name | Github | Twitter | +------------ | --------- | ---------- | +Boaz Shuster | ripcurld0 | @ripcurld0 | + +# License + +MIT. diff --git a/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash.go b/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash.go index baa61dc2..1f2e5a0c 100644 --- a/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash.go +++ b/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash.go @@ -1,10 +1,10 @@ -package logrus_logstash +package logrustash import ( "net" "strings" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // Hook represents a connection to a Logstash instance @@ -13,6 +13,7 @@ type Hook struct { appName string alwaysSentFields logrus.Fields hookOnlyPrefix string + TimeFormat string } // NewHook creates a new hook to a Logstash instance, which listens on @@ -106,6 +107,9 @@ func (h *Hook) Fire(entry *logrus.Entry) error { } formatter := LogstashFormatter{Type: h.appName} + if h.TimeFormat != "" { + formatter.TimestampFormat = h.TimeFormat + } dataBytes, err := formatter.FormatWithPrefix(entry, h.hookOnlyPrefix) if err != nil { diff --git a/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash_formatter.go b/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash_formatter.go index 7ae2cffc..e1d277b9 100644 --- a/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash_formatter.go +++ b/vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash_formatter.go @@ -1,11 +1,11 @@ -package logrus_logstash +package logrustash import ( "encoding/json" "fmt" "strings" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // Formatter generates json in logstash format. @@ -44,8 +44,7 @@ func (f *LogstashFormatter) FormatWithPrefix(entry *logrus.Entry, prefix string) timeStampFormat := f.TimestampFormat if timeStampFormat == "" { - //timeStampFormat = logrus.DefaultTimestampFormat - timeStampFormat = "2006-01-02 15:04:05.000" + timeStampFormat = logrus.DefaultTimestampFormat } fields["@timestamp"] = entry.Time.Format(timeStampFormat) diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE similarity index 100% rename from vendor/github.com/Sirupsen/logrus/LICENSE rename to vendor/github.com/sirupsen/logrus/LICENSE diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md similarity index 73% rename from vendor/github.com/Sirupsen/logrus/README.md rename to vendor/github.com/sirupsen/logrus/README.md index 126cd1fc..3f324e21 100644 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -1,11 +1,18 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** +the standard library logger. [Godoc][godoc]. + +**Seeing weird case-sensitive problems?** Unfortunately, the author failed to +realize the consequences of renaming to lower-case. Due to the Go package +environment, this caused issues. Regretfully, there's no turning back now. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +I am terribly sorry for this inconvenience. Logrus strives hard for backwards +compatibility, and the author failed to realize the cascading consequences of +such a name-change. To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). Nicely color-coded in development (when a TTY is attached, otherwise just plain text): @@ -46,6 +53,12 @@ time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x20822 exit status 1 ``` +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + #### Example The simplest way to use Logrus is simply the package-level exported logger: @@ -54,7 +67,7 @@ The simplest way to use Logrus is simply the package-level exported logger: package main import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) func main() { @@ -65,7 +78,7 @@ func main() { ``` Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` and you'll now have the flexibility of Logrus. You can customize it all you want: @@ -74,15 +87,16 @@ package main import ( "os" - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) func init() { // Log as JSON instead of the default ASCII formatter. log.SetFormatter(&log.JSONFormatter{}) - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) // Only log the warning severity or above. log.SetLevel(log.WarnLevel) @@ -123,7 +137,8 @@ application, you can also create an instance of the `logrus` Logger: package main import ( - "github.com/Sirupsen/logrus" + "os" + "github.com/sirupsen/logrus" ) // Create a new instance of the logger. You can have any number of instances. @@ -132,7 +147,15 @@ var log = logrus.New() func main() { // The API for setting attributes is a little different than the package level // exported logger. See Godoc. - log.Out = os.Stderr + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } log.WithFields(logrus.Fields{ "animal": "walrus", @@ -143,7 +166,7 @@ func main() { #### Fields -Logrus encourages careful, structured logging though logging fields instead of +Logrus encourages careful, structured logging through logging fields instead of long, unparseable error messages. For example, instead of: `log.Fatalf("Failed to send event %s to topic %s with key %d")`, you should log the much more discoverable: @@ -165,6 +188,20 @@ In general, with Logrus using any of the `printf`-family functions should be seen as a hint you should add a field, however, you can still use the `printf`-family functions with Logrus. +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + #### Hooks You can add hooks for logging levels. For example to send errors to an exception @@ -176,9 +213,9 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in ```go import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" "log/syslog" ) @@ -200,40 +237,51 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v | Hook | Description | | ----- | ----------- | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | | [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | | [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | | [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| -| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| -| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| -| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | -| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | +| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | | [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | | [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | - +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | +| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | +| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | +| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | +| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) | #### Level logging @@ -282,7 +330,7 @@ could do: ```go import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) init() { @@ -309,8 +357,11 @@ The built-in logging formatters are: without colors. * *Note:* to force colored output when there is no TTY, set the `ForceColors` field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). Third party logging formatters: @@ -359,6 +410,18 @@ srv := http.Server{ Each line written to that writer will be printed the usual way, using formatters and hooks. The level for those entries is `info`. +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + #### Rotation Log rotation is not provided with Logrus. Log rotation should be done by an @@ -370,7 +433,7 @@ entries. It should not be a feature of the application-level logger. | Tool | Description | | ---- | ----------- | |[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| -|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | #### Testing @@ -380,15 +443,24 @@ Logrus has a built in facility for asserting the presence of log messages. This * a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): ```go -logger, hook := NewNullLogger() -logger.Error("Hello error") +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/null" + "github.com/stretchr/testify/assert" + "testing" +) -assert.Equal(1, len(hook.Entries)) -assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) -assert.Equal("Hello error", hook.LastEntry().Message) +func TestSomething(t*testing.T){ + logger, hook := null.NewNullLogger() + logger.Error("Helloerror") -hook.Reset() -assert.Nil(hook.LastEntry()) + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} ``` #### Fatal handlers @@ -407,7 +479,7 @@ logrus.RegisterExitHandler(handler) ... ``` -#### Thread safty +#### Thread safety By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go similarity index 96% rename from vendor/github.com/Sirupsen/logrus/alt_exit.go rename to vendor/github.com/sirupsen/logrus/alt_exit.go index b4c9e847..8af90637 100644 --- a/vendor/github.com/Sirupsen/logrus/alt_exit.go +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -1,7 +1,7 @@ package logrus // The following code was sourced and modified from the -// https://bitbucket.org/tebeka/atexit package governed by the following license: +// https://github.com/tebeka/atexit package governed by the following license: // // Copyright (c) 2012 Miki Tebeka . // diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go similarity index 83% rename from vendor/github.com/Sirupsen/logrus/doc.go rename to vendor/github.com/sirupsen/logrus/doc.go index dddd5f87..da67aba0 100644 --- a/vendor/github.com/Sirupsen/logrus/doc.go +++ b/vendor/github.com/sirupsen/logrus/doc.go @@ -7,7 +7,7 @@ The simplest way to use Logrus is simply the package-level exported logger: package main import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) func main() { @@ -21,6 +21,6 @@ The simplest way to use Logrus is simply the package-level exported logger: Output: time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 -For a full guide visit https://github.com/Sirupsen/logrus +For a full guide visit https://github.com/sirupsen/logrus */ package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go similarity index 89% rename from vendor/github.com/Sirupsen/logrus/entry.go rename to vendor/github.com/sirupsen/logrus/entry.go index 4edbe7a2..320e5d5b 100644 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -126,7 +126,7 @@ func (entry Entry) log(level Level, msg string) { } func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.log(DebugLevel, fmt.Sprint(args...)) } } @@ -136,13 +136,13 @@ func (entry *Entry) Print(args ...interface{}) { } func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.log(InfoLevel, fmt.Sprint(args...)) } } func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.log(WarnLevel, fmt.Sprint(args...)) } } @@ -152,20 +152,20 @@ func (entry *Entry) Warning(args ...interface{}) { } func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.log(ErrorLevel, fmt.Sprint(args...)) } } func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.log(FatalLevel, fmt.Sprint(args...)) } Exit(1) } func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.log(PanicLevel, fmt.Sprint(args...)) } panic(fmt.Sprint(args...)) @@ -174,13 +174,13 @@ func (entry *Entry) Panic(args ...interface{}) { // Entry Printf family functions func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.Debug(fmt.Sprintf(format, args...)) } } func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.Info(fmt.Sprintf(format, args...)) } } @@ -190,7 +190,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) { } func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.Warn(fmt.Sprintf(format, args...)) } } @@ -200,20 +200,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) { } func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.Error(fmt.Sprintf(format, args...)) } } func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.Fatal(fmt.Sprintf(format, args...)) } Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.Panic(fmt.Sprintf(format, args...)) } } @@ -221,13 +221,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) { // Entry Println family functions func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.Debug(entry.sprintlnn(args...)) } } func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.Info(entry.sprintlnn(args...)) } } @@ -237,7 +237,7 @@ func (entry *Entry) Println(args ...interface{}) { } func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.Warn(entry.sprintlnn(args...)) } } @@ -247,20 +247,20 @@ func (entry *Entry) Warningln(args ...interface{}) { } func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.Error(entry.sprintlnn(args...)) } } func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.Fatal(entry.sprintlnn(args...)) } Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.Panic(entry.sprintlnn(args...)) } } diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go similarity index 99% rename from vendor/github.com/Sirupsen/logrus/exported.go rename to vendor/github.com/sirupsen/logrus/exported.go index 9a0120ac..1aeaa90b 100644 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -31,14 +31,14 @@ func SetFormatter(formatter Formatter) { func SetLevel(level Level) { std.mu.Lock() defer std.mu.Unlock() - std.Level = level + std.setLevel(level) } // GetLevel returns the standard logger level. func GetLevel() Level { std.mu.Lock() defer std.mu.Unlock() - return std.Level + return std.level() } // AddHook adds a hook to the standard logger hooks. diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/formatter.go rename to vendor/github.com/sirupsen/logrus/formatter.go diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/hooks.go rename to vendor/github.com/sirupsen/logrus/hooks.go diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 00000000..e787ea17 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,74 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type fieldKey string +type FieldMap map[fieldKey]string + +const ( + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" +) + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // FieldMap allows users to customize the names of keys for various fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // }, + // } + FieldMap FieldMap +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go similarity index 88% rename from vendor/github.com/Sirupsen/logrus/logger.go rename to vendor/github.com/sirupsen/logrus/logger.go index b769f3d3..370fff5d 100644 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -4,6 +4,7 @@ import ( "io" "os" "sync" + "sync/atomic" ) type Logger struct { @@ -112,7 +113,7 @@ func (logger *Logger) WithError(err error) *Entry { } func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { + if logger.level() >= DebugLevel { entry := logger.newEntry() entry.Debugf(format, args...) logger.releaseEntry(entry) @@ -120,7 +121,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) { } func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { + if logger.level() >= InfoLevel { entry := logger.newEntry() entry.Infof(format, args...) logger.releaseEntry(entry) @@ -134,7 +135,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnf(format, args...) logger.releaseEntry(entry) @@ -142,7 +143,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) { } func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnf(format, args...) logger.releaseEntry(entry) @@ -150,7 +151,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) { } func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { + if logger.level() >= ErrorLevel { entry := logger.newEntry() entry.Errorf(format, args...) logger.releaseEntry(entry) @@ -158,7 +159,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) { } func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { + if logger.level() >= FatalLevel { entry := logger.newEntry() entry.Fatalf(format, args...) logger.releaseEntry(entry) @@ -167,7 +168,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) { } func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { + if logger.level() >= PanicLevel { entry := logger.newEntry() entry.Panicf(format, args...) logger.releaseEntry(entry) @@ -175,7 +176,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { } func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { + if logger.level() >= DebugLevel { entry := logger.newEntry() entry.Debug(args...) logger.releaseEntry(entry) @@ -183,7 +184,7 @@ func (logger *Logger) Debug(args ...interface{}) { } func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { + if logger.level() >= InfoLevel { entry := logger.newEntry() entry.Info(args...) logger.releaseEntry(entry) @@ -197,7 +198,7 @@ func (logger *Logger) Print(args ...interface{}) { } func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warn(args...) logger.releaseEntry(entry) @@ -205,7 +206,7 @@ func (logger *Logger) Warn(args ...interface{}) { } func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warn(args...) logger.releaseEntry(entry) @@ -213,7 +214,7 @@ func (logger *Logger) Warning(args ...interface{}) { } func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { + if logger.level() >= ErrorLevel { entry := logger.newEntry() entry.Error(args...) logger.releaseEntry(entry) @@ -221,7 +222,7 @@ func (logger *Logger) Error(args ...interface{}) { } func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { + if logger.level() >= FatalLevel { entry := logger.newEntry() entry.Fatal(args...) logger.releaseEntry(entry) @@ -230,7 +231,7 @@ func (logger *Logger) Fatal(args ...interface{}) { } func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { + if logger.level() >= PanicLevel { entry := logger.newEntry() entry.Panic(args...) logger.releaseEntry(entry) @@ -238,7 +239,7 @@ func (logger *Logger) Panic(args ...interface{}) { } func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { + if logger.level() >= DebugLevel { entry := logger.newEntry() entry.Debugln(args...) logger.releaseEntry(entry) @@ -246,7 +247,7 @@ func (logger *Logger) Debugln(args ...interface{}) { } func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { + if logger.level() >= InfoLevel { entry := logger.newEntry() entry.Infoln(args...) logger.releaseEntry(entry) @@ -260,7 +261,7 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnln(args...) logger.releaseEntry(entry) @@ -268,7 +269,7 @@ func (logger *Logger) Warnln(args ...interface{}) { } func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnln(args...) logger.releaseEntry(entry) @@ -276,7 +277,7 @@ func (logger *Logger) Warningln(args ...interface{}) { } func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { + if logger.level() >= ErrorLevel { entry := logger.newEntry() entry.Errorln(args...) logger.releaseEntry(entry) @@ -284,7 +285,7 @@ func (logger *Logger) Errorln(args ...interface{}) { } func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { + if logger.level() >= FatalLevel { entry := logger.newEntry() entry.Fatalln(args...) logger.releaseEntry(entry) @@ -293,7 +294,7 @@ func (logger *Logger) Fatalln(args ...interface{}) { } func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { + if logger.level() >= PanicLevel { entry := logger.newEntry() entry.Panicln(args...) logger.releaseEntry(entry) @@ -306,3 +307,11 @@ func (logger *Logger) Panicln(args ...interface{}) { func (logger *Logger) SetNoLock() { logger.mu.Disable() } + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +func (logger *Logger) setLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go similarity index 99% rename from vendor/github.com/Sirupsen/logrus/logrus.go rename to vendor/github.com/sirupsen/logrus/logrus.go index e5966911..dd389997 100644 --- a/vendor/github.com/Sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -10,7 +10,7 @@ import ( type Fields map[string]interface{} // Level type -type Level uint8 +type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { diff --git a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_appengine.go similarity index 71% rename from vendor/github.com/Sirupsen/logrus/terminal_appengine.go rename to vendor/github.com/sirupsen/logrus/terminal_appengine.go index 1960169e..e011a869 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go +++ b/vendor/github.com/sirupsen/logrus/terminal_appengine.go @@ -2,7 +2,9 @@ package logrus +import "io" + // IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { +func IsTerminal(f io.Writer) bool { return true } diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/terminal_bsd.go rename to vendor/github.com/sirupsen/logrus/terminal_bsd.go diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/terminal_linux.go rename to vendor/github.com/sirupsen/logrus/terminal_linux.go diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go similarity index 60% rename from vendor/github.com/Sirupsen/logrus/terminal_notwindows.go rename to vendor/github.com/sirupsen/logrus/terminal_notwindows.go index 329038f6..190297ab 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go @@ -9,14 +9,20 @@ package logrus import ( + "io" + "os" "syscall" "unsafe" ) // IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr +func IsTerminal(f io.Writer) bool { var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 + switch v := f.(type) { + case *os.File: + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 + default: + return false + } } diff --git a/vendor/github.com/sirupsen/logrus/terminal_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_solaris.go new file mode 100644 index 00000000..3c86b1ab --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,21 @@ +// +build solaris,!appengine + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + switch v := f.(type) { + case *os.File: + _, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA) + return err == nil + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_windows.go b/vendor/github.com/sirupsen/logrus/terminal_windows.go new file mode 100644 index 00000000..7a336307 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_windows.go @@ -0,0 +1,82 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!appengine + +package logrus + +import ( + "bytes" + "errors" + "io" + "os" + "os/exec" + "strconv" + "strings" + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") +) + +const ( + enableProcessedOutput = 0x0001 + enableWrapAtEolOutput = 0x0002 + enableVirtualTerminalProcessing = 0x0004 +) + +func getVersion() (float64, error) { + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + cmd := exec.Command("cmd", "ver") + cmd.Stdout = stdout + cmd.Stderr = stderr + err := cmd.Run() + if err != nil { + return -1, err + } + + // The output should be like "Microsoft Windows [Version XX.X.XXXXXX]" + version := strings.Replace(stdout.String(), "\n", "", -1) + version = strings.Replace(version, "\r\n", "", -1) + + x1 := strings.Index(version, "[Version") + + if x1 == -1 || strings.Index(version, "]") == -1 { + return -1, errors.New("Can't determine Windows version") + } + + return strconv.ParseFloat(version[x1+9:x1+13], 64) +} + +func init() { + ver, err := getVersion() + if err != nil { + return + } + + // Activate Virtual Processing for Windows CMD + // Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + if ver >= 10 { + handle := syscall.Handle(os.Stderr.Fd()) + procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing) + } +} + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + switch v := f.(type) { + case *os.File: + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go similarity index 73% rename from vendor/github.com/Sirupsen/logrus/text_formatter.go rename to vendor/github.com/sirupsen/logrus/text_formatter.go index 9114b3ca..ba888540 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -3,9 +3,9 @@ package logrus import ( "bytes" "fmt" - "runtime" "sort" "strings" + "sync" "time" ) @@ -20,16 +20,10 @@ const ( var ( baseTimestamp time.Time - isTerminal bool ) func init() { baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) } type TextFormatter struct { @@ -54,11 +48,32 @@ type TextFormatter struct { // that log extremely frequently and don't use the JSON formatter this may not // be desired. DisableSorting bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // QuoteCharacter can be set to the override the default quoting character " + // with something else. For example: ', or `. + QuoteCharacter string + + // Whether the logger's out is to a terminal + isTerminal bool + + sync.Once +} + +func (f *TextFormatter) init(entry *Entry) { + if len(f.QuoteCharacter) == 0 { + f.QuoteCharacter = "\"" + } + if entry.Logger != nil { + f.isTerminal = IsTerminal(entry.Logger.Out) + } } func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { var b *bytes.Buffer - var keys []string = make([]string, 0, len(entry.Data)) + keys := make([]string, 0, len(entry.Data)) for k := range entry.Data { keys = append(keys, k) } @@ -74,8 +89,9 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { prefixFieldClashes(entry.Data) - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + f.Do(func() { f.init(entry) }) + + isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors timestampFormat := f.TimestampFormat if timestampFormat == "" { @@ -115,8 +131,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin levelText := strings.ToUpper(entry.Level.String())[0:4] - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) } else { fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) } @@ -127,7 +145,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin } } -func needsQuoting(text string) bool { +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } for _, ch := range text { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || @@ -150,17 +171,17 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { switch value := value.(type) { case string: - if !needsQuoting(value) { + if !f.needsQuoting(value) { b.WriteString(value) } else { - fmt.Fprintf(b, "%q", value) + fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter) } case error: errmsg := value.Error() - if !needsQuoting(errmsg) { + if !f.needsQuoting(errmsg) { b.WriteString(errmsg) } else { - fmt.Fprintf(b, "%q", errmsg) + fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter) } default: fmt.Fprint(b, value) diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go similarity index 54% rename from vendor/github.com/Sirupsen/logrus/writer.go rename to vendor/github.com/sirupsen/logrus/writer.go index f74d2aa5..7bdebedc 100644 --- a/vendor/github.com/Sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -11,39 +11,48 @@ func (logger *Logger) Writer() *io.PipeWriter { } func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) + switch level { case DebugLevel: - printFunc = logger.Debug + printFunc = entry.Debug case InfoLevel: - printFunc = logger.Info + printFunc = entry.Info case WarnLevel: - printFunc = logger.Warn + printFunc = entry.Warn case ErrorLevel: - printFunc = logger.Error + printFunc = entry.Error case FatalLevel: - printFunc = logger.Fatal + printFunc = entry.Fatal case PanicLevel: - printFunc = logger.Panic + printFunc = entry.Panic default: - printFunc = logger.Print + printFunc = entry.Print } - go logger.writerScanner(reader, printFunc) + go entry.writerScanner(reader, printFunc) runtime.SetFinalizer(writer, writerFinalizer) return writer } -func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) for scanner.Scan() { printFunc(scanner.Text()) } if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) + entry.Errorf("Error while reading from Writer: %s", err) } reader.Close() } From 55ea4404280f1ca15a982a9421b9713fdc145be8 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Thu, 6 Jul 2017 16:01:26 -0700 Subject: [PATCH 044/276] registry/{storage,handlers}: limit content sizes Under certain circumstances, the use of `StorageDriver.GetContent` can result in unbounded memory allocations. In particualr, this happens when accessing a layer through the manifests endpoint. This problem is mitigated by setting a 4MB limit when using to access content that may have been accepted from a user. In practice, this means setting the limit with the use of `BlobProvider.Get` by wrapping `StorageDriver.GetContent` in a helper that uses `StorageDriver.Reader` with a `limitReader` that returns an error. When mitigating this security issue, we also noticed that the size of manifests uploaded to the registry is also unlimited. We apply similar logic to the request body of payloads that are full buffered. Signed-off-by: Stephen J Day --- registry/handlers/blobupload.go | 8 ++-- registry/handlers/helpers.go | 13 ++++-- registry/handlers/manifests.go | 8 ++-- registry/storage/blobstore.go | 4 +- registry/storage/io.go | 71 +++++++++++++++++++++++++++++++++ 5 files changed, 91 insertions(+), 13 deletions(-) create mode 100644 registry/storage/io.go diff --git a/registry/handlers/blobupload.go b/registry/handlers/blobupload.go index 963fe4e7..0b39fff5 100644 --- a/registry/handlers/blobupload.go +++ b/registry/handlers/blobupload.go @@ -179,8 +179,8 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque // TODO(dmcgowan): support Content-Range header to seek and write range - if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil { - // copyFullPayload reports the error if necessary + if err := copyFullPayload(w, r, buh.Upload, -1, buh, "blob PATCH"); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error())) return } @@ -218,8 +218,8 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht return } - if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil { - // copyFullPayload reports the error if necessary + if err := copyFullPayload(w, r, buh.Upload, -1, buh, "blob PUT"); err != nil { + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error())) return } diff --git a/registry/handlers/helpers.go b/registry/handlers/helpers.go index dac4f7a8..dc3091a9 100644 --- a/registry/handlers/helpers.go +++ b/registry/handlers/helpers.go @@ -6,7 +6,6 @@ import ( "net/http" ctxu "github.com/docker/distribution/context" - "github.com/docker/distribution/registry/api/errcode" ) // closeResources closes all the provided resources after running the target @@ -23,7 +22,9 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { // copyFullPayload copies the payload of an HTTP request to destWriter. If it // receives less content than expected, and the client disconnected during the // upload, it avoids sending a 400 error to keep the logs cleaner. -func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error { +// +// The copy will be limited to `limit` bytes, if limit is greater than zero. +func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, context ctxu.Context, action string) error { // Get a channel that tells us if the client disconnects var clientClosed <-chan bool if notifier, ok := responseWriter.(http.CloseNotifier); ok { @@ -32,8 +33,13 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) } + var body = r.Body + if limit > 0 { + body = http.MaxBytesReader(responseWriter, body, limit) + } + // Read in the data, if any. - copied, err := io.Copy(destWriter, r.Body) + copied, err := io.Copy(destWriter, body) if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { // Didn't receive as much content as expected. Did the client // disconnect during the request? If so, avoid returning a 400 @@ -58,7 +64,6 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr if err != nil { ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) - *errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err)) return err } diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go index f406903f..bbbc10fa 100644 --- a/registry/handlers/manifests.go +++ b/registry/handlers/manifests.go @@ -22,8 +22,9 @@ import ( // These constants determine which architecture and OS to choose from a // manifest list when downconverting it to a schema1 manifest. const ( - defaultArch = "amd64" - defaultOS = "linux" + defaultArch = "amd64" + defaultOS = "linux" + maxManifestBodySize = 4 << 20 ) // manifestDispatcher takes the request context and builds the @@ -259,8 +260,9 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) } var jsonBuf bytes.Buffer - if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil { + if err := copyFullPayload(w, r, &jsonBuf, maxManifestBodySize, imh, "image manifest PUT"); err != nil { // copyFullPayload reports the error if necessary + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err.Error())) return } diff --git a/registry/storage/blobstore.go b/registry/storage/blobstore.go index 9f9071ca..4a16488b 100644 --- a/registry/storage/blobstore.go +++ b/registry/storage/blobstore.go @@ -27,7 +27,7 @@ func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error return nil, err } - p, err := bs.driver.GetContent(ctx, bp) + p, err := getContent(ctx, bs.driver, bp) if err != nil { switch err.(type) { case driver.PathNotFoundError: @@ -37,7 +37,7 @@ func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error return nil, err } - return p, err + return p, nil } func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { diff --git a/registry/storage/io.go b/registry/storage/io.go new file mode 100644 index 00000000..c1be3b77 --- /dev/null +++ b/registry/storage/io.go @@ -0,0 +1,71 @@ +package storage + +import ( + "errors" + "io" + "io/ioutil" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/storage/driver" +) + +const ( + maxBlobGetSize = 4 << 20 +) + +func getContent(ctx context.Context, driver driver.StorageDriver, p string) ([]byte, error) { + r, err := driver.Reader(ctx, p, 0) + if err != nil { + return nil, err + } + + return readAllLimited(r, maxBlobGetSize) +} + +func readAllLimited(r io.Reader, limit int64) ([]byte, error) { + r = limitReader(r, limit) + return ioutil.ReadAll(r) +} + +// limitReader returns a new reader limited to n bytes. Unlike io.LimitReader, +// this returns an error when the limit reached. +func limitReader(r io.Reader, n int64) io.Reader { + return &limitedReader{r: r, n: n} +} + +// limitedReader implements a reader that errors when the limit is reached. +// +// Partially cribbed from net/http.MaxBytesReader. +type limitedReader struct { + r io.Reader // underlying reader + n int64 // max bytes remaining + err error // sticky error +} + +func (l *limitedReader) Read(p []byte) (n int, err error) { + if l.err != nil { + return 0, l.err + } + if len(p) == 0 { + return 0, nil + } + // If they asked for a 32KB byte read but only 5 bytes are + // remaining, no need to read 32KB. 6 bytes will answer the + // question of the whether we hit the limit or go past it. + if int64(len(p)) > l.n+1 { + p = p[:l.n+1] + } + n, err = l.r.Read(p) + + if int64(n) <= l.n { + l.n -= int64(n) + l.err = err + return n, err + } + + n = int(l.n) + l.n = 0 + + l.err = errors.New("storage: read exceeds limit") + return n, l.err +} From a11fe173d5f581344a381bc95d9480b7516ab250 Mon Sep 17 00:00:00 2001 From: fate-grand-order Date: Tue, 18 Jul 2017 16:01:54 +0800 Subject: [PATCH 045/276] fix misspelling "algorithm" for cache/redis/redis.go Signed-off-by: Helen Chen --- registry/storage/cache/redis/redis.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/registry/storage/cache/redis/redis.go b/registry/storage/cache/redis/redis.go index 5a5819ac..98f0f759 100644 --- a/registry/storage/cache/redis/redis.go +++ b/registry/storage/cache/redis/redis.go @@ -224,7 +224,7 @@ func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx cont if dgst != desc.Digest { if dgst.Algorithm() == desc.Digest.Algorithm() { - return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest) + return fmt.Errorf("redis cache: digest for descriptors differ but algorithm does not: %q != %q", dgst, desc.Digest) } } From 5e5156afa32238b522926510e90256c027f9294f Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 18 Jul 2017 14:48:55 -0700 Subject: [PATCH 046/276] api: url typo in specification Signed-off-by: Stephen J Day --- docs/spec/api.md | 2 +- docs/spec/api.md.tmpl | 2 +- registry/api/v2/routes_test.go | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/spec/api.md b/docs/spec/api.md index efa55cfc..81d44e1b 100644 --- a/docs/spec/api.md +++ b/docs/spec/api.md @@ -676,7 +676,7 @@ the upload will not be considered complete. The format for the final chunk will be as follows: ``` -PUT /v2//blob/uploads/?digest= +PUT /v2//blobs/uploads/?digest= Content-Length: Content-Range: - Content-Type: application/octet-stream diff --git a/docs/spec/api.md.tmpl b/docs/spec/api.md.tmpl index cf7f9297..f96d3346 100644 --- a/docs/spec/api.md.tmpl +++ b/docs/spec/api.md.tmpl @@ -676,7 +676,7 @@ the upload will not be considered complete. The format for the final chunk will be as follows: ``` -PUT /v2//blob/uploads/?digest= +PUT /v2//blobs/uploads/?digest= Content-Length: Content-Range: - Content-Type: application/octet-stream diff --git a/registry/api/v2/routes_test.go b/registry/api/v2/routes_test.go index f632d981..6c77e281 100644 --- a/registry/api/v2/routes_test.go +++ b/registry/api/v2/routes_test.go @@ -180,8 +180,8 @@ func TestRouterWithPathTraversals(t *testing.T) { testCases := []routeTestCase{ { RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - ExpectedURI: "/blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + RequestURI: "/v2/foo/../../blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + ExpectedURI: "/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", StatusCode: http.StatusNotFound, }, { @@ -202,7 +202,7 @@ func TestRouterWithBadCharacters(t *testing.T) { testCases := []routeTestCase{ { RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/blob/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286", + RequestURI: "/v2/foo/blobs/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286", StatusCode: http.StatusNotFound, }, { From 20f225005a4f5fe497fd6456f218216387a41069 Mon Sep 17 00:00:00 2001 From: YuJie <390282283@qq.com> Date: Sat, 8 Jul 2017 06:59:10 +0800 Subject: [PATCH 047/276] Fix the sentence Signed-off-by: YuJie <390282283@qq.com> --- blobs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blobs.go b/blobs.go index 01d30902..0b13f50f 100644 --- a/blobs.go +++ b/blobs.go @@ -66,7 +66,7 @@ type Descriptor struct { Size int64 `json:"size,omitempty"` // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. + // against this digest. Digest digest.Digest `json:"digest,omitempty"` // URLs contains the source URLs of this content. From 1d957167928dda398d9ddd6976d6af2ee81079f1 Mon Sep 17 00:00:00 2001 From: Misty Stanley-Jones Date: Mon, 24 Jul 2017 14:23:50 -0700 Subject: [PATCH 048/276] Put back v2-registry-auth.png Signed-off-by: Misty Stanley-Jones --- docs/spec/auth/token.md | 2 +- docs/spec/images/v2-registry-auth.png | Bin 0 -> 11063 bytes 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 docs/spec/images/v2-registry-auth.png diff --git a/docs/spec/auth/token.md b/docs/spec/auth/token.md index f8391bd5..f5441980 100644 --- a/docs/spec/auth/token.md +++ b/docs/spec/auth/token.md @@ -8,7 +8,7 @@ keywords: ["registry, on-prem, images, tags, repository, distribution, Bearer au This document outlines the v2 Docker registry authentication scheme: -![v2 registry auth](../../images/v2-registry-auth.png) +![v2 registry auth](../images/v2-registry-auth.png) 1. Attempt to begin a push/pull operation with the registry. 2. If the registry requires authorization it will return a `401 Unauthorized` diff --git a/docs/spec/images/v2-registry-auth.png b/docs/spec/images/v2-registry-auth.png new file mode 100644 index 0000000000000000000000000000000000000000..3b05d04b5bd6c9830266a5c53c9362251551a0e5 GIT binary patch literal 11063 zcma)i1yoe+_wEow*C>q&4oElBjYEob=Ku;w2uKPHrRdOt2uOo;4I$nANr&XnNOuf5 zz#aYm|KGZI-F4$^X4cC-`#rJWXFtzAaKf}Slu3!`i2wiqsj3P@2LQmD#(eAv@Gzd! z``fz!0OzeLL|)GaYqvcZ;{ya`-W1XlV*eTb-#6QhM?s9QL)e47<7SdFoRK0!-l0{_ z$eMqDjzl+n!|hlZnhjAYl|5U#ZwesgfirUN+sl8xyk{4tvOpZ93NKcGKY{3dl~yJ= zUNSN^5YZYRKzW#Y^LFL-ht4Q8Ud)RKN#V`OvtGj&z3w}mV)4#x+-#0rr+wU!NA(lXnq>LG!N{5$xV5(Iv4_B~Hl(MjN+V9A_TZzuFIv}DrCEDz(sb-fHbx`o? z#y_|ZO$ zT#FJ2-?#H`UD8dn3biHY3OXHUPTuu(%^yy&m}@024FN)tX*QxIrn z>kP68_rm{qo|R;m;b$d1RV`qIg;QjP?ug)Hd<>S?zTd?TS@o7hv+!!YvH}R#wxIX( zX}?p<(@B#~Tu^Ory%5R=HasP{&^$xcF1`>*arUHBlv(nb!1;n6VVIPY3odG*f06eR z>>H-Q!5DCSxfCh>*6OoIaDGJS$i1+CR8^kloQ|A9QswzPkKzjWH@7o8}}!t*dzUb7f1Z1Z3C zzeHl;>pETd71O&@O6sN|@5mdL$aM~}b|iW?RSSC%Vs96&V6;pl-J8b!xuf<{1MHPp z%CIQ>lSdu`o?EskHRyxyY`OHUD^OQCQHRdvpB8B&0Fod%3fw!-xkX3@v-?k#puPP) zgR14)!!wt{B4pq?fh+EM?NY56_Jd$TZUSzClC}_x*{Vb3F$Sc@rN$Mp!6wESS2+{L z=W7cDS&uK@%!WFuZh?(o3=uulXx5!2iPj=GI*n z*UlV2vQh)~X$D>07)a|a@x~4NS~cR4P|bw>NJnzb&h1d;2G2{vFD!Sg0$synJfCDt z7)9l+*@`rvX>GT7&)7RZ#z27)iKGSBlm>R(NGH0gwX1cv*4%?1PZ&x(Yj368?|6fI zDfE2Gw0DLkTQ-<3Ufj0!hsT#~)~!{^vX!Gj>4Mmw2VysFXc*%Pv7W|kj@fPI)Z@8+ z1%bgmj|FQ&Xszoz ze(^n{KK!VZtu4RJK3xVOcMfd!H<@Y9(q2?UnWbp=^CTau;hDxC-Q-Px+fs{@GlK>l z(m_QVo%|!8NuOVxh8NoPy_}OyZTk~dLo6IRJLZn>TcxrGB&JcX zqwojv5;gddD&5>?UR@E03RuI>N8y$uaT({0T!G3uM^%w$$_Tl3rN5$D_;U)Ab)amU zVrtwdbui@jW(0ZPH+=t-_U~p_E4{7XO>Mi!9|W(yUk8Y6n0+zpBiK3z41ep7lN8^KB@(QX$utd8 zm6LC-l&Jnrigh4vvM=y*&h%{0vNwzgug#~O(1(tmB|SWu+Shl1<|Z6L%k@*S!DHuZ z3DU{y_T|u-ou3!Ovqj=*Y3gzW=&Z}On5Duy_*lA4rc!?im%C+|y$UZ2Dh^1u{~jaQ z?)0#HAVT`V31C`=m`urlzO5kucjtZ%6Wcq0UWPe-m!P3zwlw@WBEWA4zot~Xm9K4^ z&lRVbr9Cx7aMVI}$FwY^U5BI(JDdZ!glz}roXCq)g*^zGNN?A+{Ycu^X+Q55Z#LJ< z4}mq}H}+zmd3MZHjXGV7BNO%a()`XC#pMZ0M7gEwraiZMX@=d=_~rQf=X`l)-Tx5Z2?H?-e`PV=L& zttva*j}9`L6gTt_bY7W`++H$&pws?--#<`D-G8NIhBoPe@q9+>oz#HXvexBAY$=|E zvDrjvr{Nq{32cDbhi?a^=aMlf)Rs^xvT#URWT956iDi&LR5ZQ_$lYxsGF`%3or$Tq zP})!Di$IW@A~0w>6wP(FMU-u^h5xa503 zv3zd3E#ppqSGWglmtvnSNi15I^bo%d@jgjgl|HnaF)nx9?L1duirl(N3%IE4*-N)c zLoK^Z4fvG|H&zCjgSI#lwODf*Vz-nngJ`gHmVmpjVt~7Seb`=#FWCfjTR>E^RTo-; z**aqzce@ic!>~w`YdHGvhC`FLV z`G5?vz&W(~e-f2>4XOmY0hYxcG+R|n?a_$hBlxcMaW*QBi9l3dX`eK9a$mB40ZCfh z4ds^seyeYhDN!rbgPByWQIJEtMqPgWN~B%DX{&pu$B8;I=BlkY_maoy9g1&)p?QM` z%q$rVsA%tb6;G9iNqsigDhufzg8H!qrF_yZnlLr>+PV&FuEj6X`W~D!!C)8kZ#><7 z(R$ZHA}NZQPz{&5m^~gq=cvH#>wkG+3YPTjkYw`nj*q4SIks%$E3XaoJScVdrdv}dY{D(K4?Ni1ZiK?YSrQ` ziCf{ZXr?V!@H8sUC44UbXKJX42dL*T$bkMPL~eUOUMO%>cXNhN3$QAFbZ_nW`UUdb z$2P(O^tI6Jx>_w)W@8sA$ST3RJa2Bde{64{=$Ty8$U@NZ%!8YZcKEq%(@V>92I;*h zVL!!RO*R>|M4`upTuu>KULTZ{ELWdTnp}KOK`Zt@P)4N7c|oN_pFp(zCvwcs*(Nus z-ApmcbNIlrPTD)yS)h$R%D}s7YJ`x+N5y@#U@OAK_uaJxBx6X<8k#iV^K&8MMf~sC zxrxSwZ{u9qgeL|t62h1!E>6k~b2n2;E&O!asJw{oK7$3(S4SUr$}YmVqmZ77yXTMT z(Mgp*iKphdjI(+`y4Z+*%WIs6ayYV`{1*Ij6d!8atw4#bw^z`f<5I-!556wvZ#6Gh z8`Y{A%uy%gO;5sCguJJz`3kTYmatR;5znTh$Yh@ zZV#}4zCkY$`CA>3F|C?hKMlHb<)xc;se$Q*%@N&v*(f|Bx0YOY)cv^FwUMsY5dZ`_hg})%RfnK~5 z`bD;J0o*V;>D3o&&l;uzrCiW4PB_x(a&s}Q_@T*VR@l&dtVUz6@hOy^Fv@S8rtk^kSuGa9k^9+i##gt_XlV7yblg&sb z`3q7hcd^PW*#zTwff!v)A@akA%mM9-g~Pf{iiTA;l6e*v3EWBfX3Fp>-H8|-Ys6a`ea%X`$gVO9o%Y9U+Vdhbl07$|qV^PsWnQ)F%k&&IjHS&enwj@)s+g z5Sypk7_AUTkQ zv^bg?jhTUpL&tW=de+suW(RSkjVrYd@7&4nllafdc)1288cjHX@@ZL{hnm^>N5%~^ zV3DJPqB{(vZo!Z0W(e$z+Rd9f<7~r?x~Y7DV>{o(somiB^|rN_h0|+yQ&2)E@w2VR zML4?+HJ_XTm|LP6s^GDX_H_H0zdepglEbGbW>%)szB1Af6kym)vP=3;5K>I}iS^KY z2QFp}iSmq2Uez4ZBPwG0{Ed2XaZq(PVOOoZL*)_XP4c-KnDEBw$JtG!>aKntBY3C- zQLwikKCSmsY-|?wPO|y>N=|pNN`RB?O0|6FJ0$glN#2nu`afwDW@K_b1U}OwHu7}F z_ghyEz5f6MnAEIdb8rtW_0{~tR!efk;ZuC4ffc|zLSb_0DF1IVy!`x@)B#muk5kIQ zBCcT;S&MrYH)Ivzk3WLLIV}~>b6*Jei*@E*df@ApOg0hjWO>19ZS2Sby%{vE@bH}= z6{YH28SmKVyMdDfwfJ?ARJI$2qbcqhClM**go^?#^d z!Ia&tuge7taMzdIT_%|qe(uvyG!OL=MdD^+MwfFA5rRMYq=uPd^z(ch zj{4}-TM(aXxEf5!C#pyhG2UvJ(TP1QWv!9oj5*n$q~G>`_%#jd(n|-SgBrt9<+{-7-MsF?S-4C6y9m;-i zi~O_H?}jfw7af&7pCJD65Cs{q$anRH@vboFFVpsJqnZ?-Y2v7tTbu|5a#){v5#>(F z0UaezoYzo+Mvze+~UClE{_ptn0e{(9Ru6Oe}r6GRDAcA ziHgB1J^I1J^V}(s7=+dmOP%CJtR z`H)~)dFdr*>XH`BpilI^;^s0e>-35Mq1dHv@^;en>FJLkZy%$jZ+zkGHgLDmsUeU+ zRVXOth>SRSU&3?Yg^>1uMTKD<&_YrfLSwUAj5(jy>y|W=xPZ$i4?($jpE3FG^Ot-9 zGYyWOXXV*~=S&?OO&;6>?bLMu9Lijf4%)r@U;Gho>T{;B^}Rs?T|i$T=Luf&fe7og zRz|O<*O=1SPO8DYBIV}*J=&U0ic7Nl+{MEn4XrRtn(Rv-4OJ9d@R@qRcPE;z$|Guh zz$^VI;swM)*AoeF_M~CfdFJ}F--YYNhxFgEhy8WC5@$NY1be2ni{aCphn%Md5oEE7 zW-ETEnJ(N~#eo))H_RA1OO?jdpv<|62Ry!D+b^VcGd#(8>)n_mM4n3HNTKiE_97d z}&_S~gt zg(&RqgVS4IlB$}&K%`C8|N8JrQup2V;)(- zXTq~33|5R2ZzT*gkA( zS81*>6+G30tz7=r_f56dq$3xg21V7t&+9CiZql++HgusQPLtMDhDKVgWV{uR!SpVq zx^o<#e_7bKRAUn}??c0dMR#4*qXn&YzzBD8tc4$@{*LO& z+(L@bUo({?dfeLOt5kgFd&D_Q?}5xymtSXb$v;mn{X{I4-kxcE{iNAIthdG>9Bv9s z_Fe+mc)U{WMewZLJC|o^t}%c(K|_$+x)`yuQ|)ELA2b*!o@P(7SblI`Z2{R`d0>v( zTD?auyhwo1E*$Qp#~bVm6L`dBgOj*v-wbEhFK8Ky+u^{hBm$Eb#oVAIB#vw3L(tKn z5j+1coO8D@#lf0LF8`?@`6_l-0Bf@nBzV39h{*r#hZ1C+R=`|bOFJ{@=*29r>tLS4 zC@P)}IH|C}KL4Dw&Fh$mzCcL+^;-yjg1$&j|?5(5lF{|KuV}h;QS8l zBf5o+CGQay){Oeg-Q@36`evEIk5=y8AZosd<`SB&sqNa}ITFW|vw>Ie_*_pEKJ5P9 zTY1pPQQ_j@JFnrJmJ5G+u=_O-IImJG(B6#w-)X@0c&ka{5H59d!XfwMcfr1(4zyc(Z8Idcp(d^%(?=16X6r2hpD56D_iLnSnk z-L$j`r|>SU6keFcV6dP46XjeSYVqkz7)gN8DjeofFXxaEHGjkYJ%X!*iwV3UpH=c!~7x3dwb{Gu?gve2Ye(h%&Ejn-98DO8U1*`%9Nb z5NM4xh&Z0~X&WAEU9Jq3#-Wa6+L8=sg3Ba^3Q$HXpm>{;oIng zphwH6SBr+Nvp=G^xL+rd?XLj7i>o0vrbTc-AU?N9wd;c*-X)%dvOCxq26G(0o+Bs7 z`pZ1|QSZ`On`?|Ar>@cF?5_A8{b-|J;u0&W37U8Y?gC8=NMCs`m zCA-#X^3W)2WVH#VQ;!0#kRZjC{i(g!^s48b?eecGw7q!wbqk zjMKM}%5a~id5`aUxCKp#b%*nvlemniw(gtdP3fCuV*fk5|1m_CwG0U^w` z=zptvgLrxV+_1gO{XkXosI>V^tl{ca>foEDy|9JxGLrQb->>bQF~=a;VGvT=kJR6Wl zZCh`oMzR6qpddnS0EH0*n;3vg{bwAk`0qHj4g23Qg((3yI2Z`Pj`(iJd)lAz&a@wn zOC8j9>z!7y2?|`H+qm4B^*SkSpuSw_?A5amT-`U_j`%pIookaWaTh$2bWNo>vZ?}g zC&nCt8j=zmo#g7W-ik^eGC>f3mu6-3zqAP*ByX?@w?udCWDytCO!@9P8vGQ>s{J`G zu_r|TN?Oy!MciY6#PS$@z4oZ;Ok3(*!X#4Nx4L3p9A?2jdG+|&Mz+8XK8a+DG@FEN z8e-`AFsVxnE_Eg)0eA4%J>Szw!^NY7+wjO@`VGOuxtusbi^_oE15Tsi^P{rFI_`@Q$Xfuf?fIeSpZI@X1(KWY@WR0GG zb!h_6=2h0?Q(@m*2^W}Uc5f{s_2)M1c)Z{zF{QnAlf*f zSwRkI9N%pUHj_~Ly`zT284s1^qaImqXeG35f1iAsQoWie;l)ypS#&C$ZY>&_c@us3 zu6MYvS!>3x%77{RT6lG-Z6wjRbj0B%wd1LxMtIs;;pZ){qBE=3)(Q6?C7!bxQS#cn zJ4v1oqKsoqAM7O{-ryrkZ9*4&N(8ox-_aR1LPIVdLLKP~`@L?Ix*QKqn%`|; zxKuRN9a&Eg1UMNMd!=cTNuiTkPh)QD$ped&Ku25NttW{V$n_Y^0yD&xiUUdvwX`m_ zE)C7@XlNw5s(18xB+&X=Mmx+KX3uC`08C#s76Ui6+0cF6qynP0m6CfkQwNuqq0%@H zd-^d<+ZG@Xy)(HTK$^|Hg+Q9mpLE*xsyX0#G6{<{PjJ(0UOzRiC+*1pmXf?Gb4*#2&PaLd0+^ zvw^o-kef6o|HW^_nB;?l2eJP{+@_v9$3oQ7Iy}8&8CI%1Dd*gN9hOryDU1@fN`vLAb2?882B$@%IA5t++XBZ>mXv73kYG9~j=0vBFQZFdW0 zIa33nbEqs(+wVpjZUlA43WZEf=LS#qJC8aM?1m&Na@eYYR+fm~TeSXZY+C zYe7-^u~i#C1G?}3;439pG`rR+r@ZEs#=kB+VKiFxK?ReZEsS}o4<=Uu4SK_5-fWb3 z(x)ttv_G1vcQiA}cauJz-S9Q*JeUnU;$X4D^U1B;nx`)vrjDnih^S#v}*z+oUbF%HGs8VHkJzucmdr7)SsfDP` z`Nd<*u<-Qa^v-+{kP$}8F-^c<82;%D{x$^Qf4c+>QlQTNKh%p${Vxsw%hZ_h|H|4j z_6$`L7{tO9v5DoN{cp3x(9>Vgk%}=F_?EYI>cc$gQqL2vrzLDhKTG~db)TL782t_8 zi;kU=nObczk6EaoJy~J1IG?KCGswD$POsN;nDIW4Z0fUdv|%j=P$1j3!J|cvvDsO~ z`R0&*>rkB6!^8TXS$sD~Vv+SLHqHx2c_Vj1XLqXlYEUeu$>51dHmPU*`xv$v*&Xz5 z%57y7SYFU#sxHxTY#w~OOZ`J3;3UXedU#PBw%`yvv1;y1cBa{c`Z^fBC&2TmrzFDV z=gGlN2j8Cf-4uqs92oCxhDn}g5@qwFAE}bxBftjbPCFHqa4Uj*-xJ8gKYzd`B%maq zkbZ_3VU9mIK5}pDjcza(>#UQk5^}#47rG4`8i;RK@-!?jP<7Fe{mg&Wx?)|hCc=H|+E}dNt50hE3wzN;I z{-)SaBJDX{8{>F!J8sx_5h{>m6ZXw(pVa>=K32tX?i@CSG~*Ezz5R+wC-nY6AOZlu^#K##!7_7@Vn}1(nZdVY)8rDJ~+`_P*|z$ITrWZN9K7O=#n%8o1$Iz@r3 z7flT;!vfvzQmjR(Sle>n6vD3SW9&Bq9D?_L6^^rF6K}m+V;eP?QNabDNt0HD{ATf6 zgw_iVIYMTy@Kk4tp+Bhwn>{?)3MI(8)EIWQh%I`+hEub){Jbru;G*fwN7n<2V{DGZ z?G$#ClEa+W2I_t-;Scc%*YSA$@u~eopu6Pcc_NLH%7VUb(hTJ2i!u772epOS3E{E^gYYOCw0o7%uj7o55G`>!U4z5F?J zTf^GCwtp2aLZ6h3$`EYJHM3kGb|Wo72nU-bxs%#6DwgB&8-GOE?sd)U8~EZ60scTT zirx0d11+t0Zw`2)^9WV|(YLaumUHtDvD@b7=hOKA!6n5Z1rUZ1?~{!u)~}TOd7X0m zq~S-B;vDS~slk@1{%sU*VAdPgzD3vZn>g0DYqCOjh^BUIFXOo6@>8KeOUq?dg96V! zd>wDi;c@@LoSZxk*eY)VCwY}Zsjkdt@XB%f7a9v+`7g=EYmTDLVekm7A0@)wI+rjR z@tkEw6U^WqnH637{tBxwK7 z$n}KD>72-a+1)56V}y8)JFSbmMPKLKTj73My6)l)B*dnsOC7#7S zw<VNUf;*GX#dZx%{wvWNfefB^T3}_ NRYeU*nF2KAe*o#eDF^@n literal 0 HcmV?d00001 From f7fb45f59a3209593e3c81d075bab8285fe5172f Mon Sep 17 00:00:00 2001 From: Riyaz Faizullabhoy Date: Mon, 24 Jul 2017 14:51:10 -0700 Subject: [PATCH 049/276] add CONTRIBUTING info for security issues Signed-off-by: Riyaz Faizullabhoy --- CONTRIBUTING.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7cc7aedf..1f1441be 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -21,6 +21,14 @@ Then please do not open an issue here yet - you should first try one of the foll - irc: #docker-distribution on freenode - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +### Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + ## Reporting an issue properly By following these simple rules you will get better and faster feedback on your issue. From 3d7803ec8c049a17d7e64a2849cd706f1dd20023 Mon Sep 17 00:00:00 2001 From: Jon Johnson Date: Fri, 28 Jul 2017 10:13:32 -0700 Subject: [PATCH 050/276] Fix typo in api spec Signed-off-by: Jon Johnson --- docs/spec/api.md | 2 +- docs/spec/api.md.tmpl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/spec/api.md b/docs/spec/api.md index 81d44e1b..29dc51e5 100644 --- a/docs/spec/api.md +++ b/docs/spec/api.md @@ -685,7 +685,7 @@ Content-Type: application/octet-stream ``` Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated +`digest` parameter and zero-length body may be sent to complete and validate the upload. Multiple "digest" parameters may be provided with different digests. The server may verify none or all of them but _must_ notify the client if the content is rejected. diff --git a/docs/spec/api.md.tmpl b/docs/spec/api.md.tmpl index f96d3346..4338b057 100644 --- a/docs/spec/api.md.tmpl +++ b/docs/spec/api.md.tmpl @@ -685,7 +685,7 @@ Content-Type: application/octet-stream ``` Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated +`digest` parameter and zero-length body may be sent to complete and validate the upload. Multiple "digest" parameters may be provided with different digests. The server may verify none or all of them but _must_ notify the client if the content is rejected. From 9c88801a12a97de49abf26e9604975eececa654c Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 11 Aug 2017 15:31:16 -0700 Subject: [PATCH 051/276] context: remove definition of Context Back in the before time, the best practices surrounding usage of Context weren't quite worked out. We defined our own type to make usage easier. As this packaged was used elsewhere, it make it more and more challenging to integrate with the forked `Context` type. Now that it is available in the standard library, we can just use that one directly. To make usage more consistent, we now use `dcontext` when referring to the distribution context package. Signed-off-by: Stephen J Day --- blobs.go | 2 +- context/context.go | 18 +-- context/http.go | 23 ++-- context/logger.go | 19 +-- context/trace.go | 5 +- context/trace_test.go | 8 +- context/util.go | 5 +- context/version.go | 16 ++- contrib/token-server/main.go | 61 +++++----- contrib/token-server/token.go | 5 +- manifest/schema1/config_builder.go | 2 +- manifest/schema1/config_builder_test.go | 7 +- manifest/schema1/reference_builder.go | 4 +- manifest/schema2/builder.go | 3 +- manifest/schema2/builder_test.go | 2 +- manifests.go | 2 +- notifications/listener.go | 29 ++--- registry.go | 3 +- registry/auth/auth.go | 3 +- registry/auth/htpasswd/access.go | 7 +- registry/auth/silly/access.go | 11 +- registry/auth/token/accesscontroller.go | 5 +- registry/client/blob_writer.go | 2 +- registry/client/repository.go | 2 +- registry/handlers/api_test.go | 2 +- registry/handlers/app.go | 113 +++++++++--------- registry/handlers/blobupload.go | 26 ++-- registry/handlers/context.go | 16 +-- registry/handlers/helpers.go | 11 +- registry/handlers/manifests.go | 18 +-- registry/middleware/registry/middleware.go | 2 +- registry/middleware/repository/middleware.go | 2 +- registry/proxy/proxyblobstore.go | 9 +- registry/proxy/proxyblobstore_test.go | 2 +- registry/proxy/proxymanifeststore.go | 5 +- registry/proxy/proxymanifeststore_test.go | 2 +- registry/proxy/proxyregistry.go | 5 +- registry/proxy/proxytagservice.go | 3 +- registry/proxy/proxytagservice_test.go | 2 +- registry/proxy/scheduler/scheduler.go | 15 +-- registry/registry.go | 19 +-- registry/root.go | 4 +- registry/storage/blob_test.go | 2 +- registry/storage/blobserver.go | 2 +- registry/storage/blobstore.go | 7 +- registry/storage/blobwriter.go | 15 +-- registry/storage/blobwriter_resumable.go | 2 +- registry/storage/cache/cachecheck/suite.go | 2 +- .../cache/cachedblobdescriptorstore.go | 11 +- registry/storage/cache/memory/memory.go | 2 +- registry/storage/cache/redis/redis.go | 2 +- registry/storage/catalog.go | 2 +- registry/storage/catalog_test.go | 2 +- registry/storage/driver/azure/azure.go | 2 +- registry/storage/driver/base/base.go | 21 ++-- registry/storage/driver/base/regulator.go | 2 +- registry/storage/driver/filesystem/driver.go | 2 +- registry/storage/driver/gcs/gcs.go | 33 +++-- registry/storage/driver/gcs/gcs_test.go | 18 +-- registry/storage/driver/inmemory/driver.go | 2 +- .../middleware/cloudfront/middleware.go | 5 +- .../driver/middleware/redirect/middleware.go | 2 +- registry/storage/driver/oss/oss.go | 3 +- registry/storage/driver/oss/oss_test.go | 8 +- registry/storage/driver/s3-aws/s3.go | 2 +- registry/storage/driver/s3-goamz/s3.go | 7 +- registry/storage/driver/storagedriver.go | 3 +- registry/storage/driver/swift/swift.go | 2 +- .../storage/driver/testdriver/testdriver.go | 3 +- .../storage/driver/testsuites/testsuites.go | 5 +- registry/storage/filereader.go | 2 +- registry/storage/garbagecollect.go | 2 +- registry/storage/io.go | 2 +- registry/storage/linkedblobstore.go | 11 +- registry/storage/linkedblobstore_test.go | 5 +- registry/storage/manifestlisthandler.go | 11 +- registry/storage/manifeststore.go | 13 +- registry/storage/manifeststore_test.go | 2 +- registry/storage/purgeuploads.go | 10 +- registry/storage/purgeuploads_test.go | 2 +- registry/storage/registry.go | 2 +- registry/storage/schema2manifesthandler.go | 9 +- registry/storage/signedmanifesthandler.go | 9 +- registry/storage/tagstore.go | 2 +- registry/storage/tagstore_test.go | 2 +- registry/storage/util.go | 3 +- registry/storage/vacuum.go | 7 +- registry/storage/walk.go | 2 +- registry/storage/walk_test.go | 2 +- tags.go | 2 +- 90 files changed, 396 insertions(+), 373 deletions(-) diff --git a/blobs.go b/blobs.go index 01d30902..145b0785 100644 --- a/blobs.go +++ b/blobs.go @@ -1,13 +1,13 @@ package distribution import ( + "context" "errors" "fmt" "io" "net/http" "time" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" ) diff --git a/context/context.go b/context/context.go index 23cbf5b5..ab686546 100644 --- a/context/context.go +++ b/context/context.go @@ -1,21 +1,16 @@ package context import ( + "context" "sync" "github.com/docker/distribution/uuid" - "golang.org/x/net/context" ) -// Context is a copy of Context from the golang.org/x/net/context package. -type Context interface { - context.Context -} - // instanceContext is a context that provides only an instance id. It is // provided as the main background context. type instanceContext struct { - Context + context.Context id string // id of context, logged as "instance.id" once sync.Once // once protect generation of the id } @@ -42,17 +37,10 @@ var background = &instanceContext{ // Background returns a non-nil, empty Context. The background context // provides a single key, "instance.id" that is globally unique to the // process. -func Background() Context { +func Background() context.Context { return background } -// WithValue returns a copy of parent in which the value associated with key is -// val. Use context Values only for request-scoped data that transits processes -// and APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key, val interface{}) Context { - return context.WithValue(parent, key, val) -} - // stringMapContext is a simple context implementation that checks a map for a // key, falling back to a parent if not present. type stringMapContext struct { diff --git a/context/http.go b/context/http.go index 7d65c852..b7447800 100644 --- a/context/http.go +++ b/context/http.go @@ -1,6 +1,7 @@ package context import ( + "context" "errors" "net" "net/http" @@ -68,7 +69,7 @@ func RemoteIP(r *http.Request) string { // is available at "http.request". Other common attributes are available under // the prefix "http.request.". If a request is already present on the context, // this method will panic. -func WithRequest(ctx Context, r *http.Request) Context { +func WithRequest(ctx context.Context, r *http.Request) context.Context { if ctx.Value("http.request") != nil { // NOTE(stevvooe): This needs to be considered a programming error. It // is unlikely that we'd want to have more than one request in @@ -87,7 +88,7 @@ func WithRequest(ctx Context, r *http.Request) Context { // GetRequest returns the http request in the given context. Returns // ErrNoRequestContext if the context does not have an http request associated // with it. -func GetRequest(ctx Context) (*http.Request, error) { +func GetRequest(ctx context.Context) (*http.Request, error) { if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { return r, nil } @@ -96,13 +97,13 @@ func GetRequest(ctx Context) (*http.Request, error) { // GetRequestID attempts to resolve the current request id, if possible. An // error is return if it is not available on the context. -func GetRequestID(ctx Context) string { +func GetRequestID(ctx context.Context) string { return GetStringValue(ctx, "http.request.id") } // WithResponseWriter returns a new context and response writer that makes // interesting response statistics available within the context. -func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { +func WithResponseWriter(ctx context.Context, w http.ResponseWriter) (context.Context, http.ResponseWriter) { if closeNotifier, ok := w.(http.CloseNotifier); ok { irwCN := &instrumentedResponseWriterCN{ instrumentedResponseWriter: instrumentedResponseWriter{ @@ -125,7 +126,7 @@ func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.Respo // GetResponseWriter returns the http.ResponseWriter from the provided // context. If not present, ErrNoResponseWriterContext is returned. The // returned instance provides instrumentation in the context. -func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { +func GetResponseWriter(ctx context.Context) (http.ResponseWriter, error) { v := ctx.Value("http.response") rw, ok := v.(http.ResponseWriter) @@ -145,7 +146,7 @@ var getVarsFromRequest = mux.Vars // example, if looking for the variable "name", it can be accessed as // "vars.name". Implementations that are accessing values need not know that // the underlying context is implemented with gorilla/mux vars. -func WithVars(ctx Context, r *http.Request) Context { +func WithVars(ctx context.Context, r *http.Request) context.Context { return &muxVarsContext{ Context: ctx, vars: getVarsFromRequest(r), @@ -155,7 +156,7 @@ func WithVars(ctx Context, r *http.Request) Context { // GetRequestLogger returns a logger that contains fields from the request in // the current context. If the request is not available in the context, no // fields will display. Request loggers can safely be pushed onto the context. -func GetRequestLogger(ctx Context) Logger { +func GetRequestLogger(ctx context.Context) Logger { return GetLogger(ctx, "http.request.id", "http.request.method", @@ -171,7 +172,7 @@ func GetRequestLogger(ctx Context) Logger { // Because the values are read at call time, pushing a logger returned from // this function on the context will lead to missing or invalid data. Only // call this at the end of a request, after the response has been written. -func GetResponseLogger(ctx Context) Logger { +func GetResponseLogger(ctx context.Context) Logger { l := getLogrusLogger(ctx, "http.response.written", "http.response.status", @@ -188,7 +189,7 @@ func GetResponseLogger(ctx Context) Logger { // httpRequestContext makes information about a request available to context. type httpRequestContext struct { - Context + context.Context startedAt time.Time id string @@ -247,7 +248,7 @@ fallback: } type muxVarsContext struct { - Context + context.Context vars map[string]string } @@ -282,7 +283,7 @@ type instrumentedResponseWriterCN struct { // implemented by the parent ResponseWriter. type instrumentedResponseWriter struct { http.ResponseWriter - Context + context.Context mu sync.Mutex status int diff --git a/context/logger.go b/context/logger.go index 86c5964e..afc8860b 100644 --- a/context/logger.go +++ b/context/logger.go @@ -1,10 +1,11 @@ package context import ( + "context" "fmt" + "runtime" "github.com/sirupsen/logrus" - "runtime" ) // Logger provides a leveled-logging interface. @@ -40,22 +41,24 @@ type Logger interface { Warnln(args ...interface{}) } +type loggerKey struct{} + // WithLogger creates a new context with provided logger. -func WithLogger(ctx Context, logger Logger) Context { - return WithValue(ctx, "logger", logger) +func WithLogger(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) } // GetLoggerWithField returns a logger instance with the specified field key // and value without affecting the context. Extra specified keys will be // resolved from the context. -func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { +func GetLoggerWithField(ctx context.Context, key, value interface{}, keys ...interface{}) Logger { return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) } // GetLoggerWithFields returns a logger instance with the specified fields // without affecting the context. Extra specified keys will be resolved from // the context. -func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { +func GetLoggerWithFields(ctx context.Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { // must convert from interface{} -> interface{} to string -> interface{} for logrus. lfields := make(logrus.Fields, len(fields)) for key, value := range fields { @@ -71,7 +74,7 @@ func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys . // argument passed to GetLogger will be passed to fmt.Sprint when expanded as // a logging key field. If context keys are integer constants, for example, // its recommended that a String method is implemented. -func GetLogger(ctx Context, keys ...interface{}) Logger { +func GetLogger(ctx context.Context, keys ...interface{}) Logger { return getLogrusLogger(ctx, keys...) } @@ -79,11 +82,11 @@ func GetLogger(ctx Context, keys ...interface{}) Logger { // are provided, they will be resolved on the context and included in the // logger. Only use this function if specific logrus functionality is // required. -func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { +func getLogrusLogger(ctx context.Context, keys ...interface{}) *logrus.Entry { var logger *logrus.Entry // Get a logger, if it is present. - loggerInterface := ctx.Value("logger") + loggerInterface := ctx.Value(loggerKey{}) if loggerInterface != nil { if lgr, ok := loggerInterface.(*logrus.Entry); ok { logger = lgr diff --git a/context/trace.go b/context/trace.go index 721964a8..5b88ddaf 100644 --- a/context/trace.go +++ b/context/trace.go @@ -1,6 +1,7 @@ package context import ( + "context" "runtime" "time" @@ -36,7 +37,7 @@ import ( // // Notice that the function name is automatically resolved, along with the // package and a trace id is emitted that can be linked with parent ids. -func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { +func WithTrace(ctx context.Context) (context.Context, func(format string, a ...interface{})) { if ctx == nil { ctx = Background() } @@ -69,7 +70,7 @@ func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { // also provides fast lookup for the various attributes that are available on // the trace. type traced struct { - Context + context.Context id string parent string start time.Time diff --git a/context/trace_test.go b/context/trace_test.go index 4b969fbb..f973f9a4 100644 --- a/context/trace_test.go +++ b/context/trace_test.go @@ -1,6 +1,7 @@ package context import ( + "context" "runtime" "testing" "time" @@ -35,7 +36,7 @@ func TestWithTrace(t *testing.T) { ctx, done := WithTrace(Background()) defer done("this will be emitted at end of test") - checkContextForValues(t, ctx, append(base, valueTestCase{ + checkContextForValues(ctx, t, append(base, valueTestCase{ key: "trace.func", expected: f.Name(), })) @@ -48,7 +49,7 @@ func TestWithTrace(t *testing.T) { ctx, done := WithTrace(ctx) defer done("this should be subordinate to the other trace") time.Sleep(time.Second) - checkContextForValues(t, ctx, append(base, valueTestCase{ + checkContextForValues(ctx, t, append(base, valueTestCase{ key: "trace.func", expected: f.Name(), }, valueTestCase{ @@ -67,8 +68,7 @@ type valueTestCase struct { notnilorempty bool // just check not empty/not nil } -func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) { - +func checkContextForValues(ctx context.Context, t *testing.T, values []valueTestCase) { for _, testcase := range values { v := ctx.Value(testcase.key) if testcase.notnilorempty { diff --git a/context/util.go b/context/util.go index cb9ef52e..c462e756 100644 --- a/context/util.go +++ b/context/util.go @@ -1,13 +1,14 @@ package context import ( + "context" "time" ) // Since looks up key, which should be a time.Time, and returns the duration // since that time. If the key is not found, the value returned will be zero. // This is helpful when inferring metrics related to context execution times. -func Since(ctx Context, key interface{}) time.Duration { +func Since(ctx context.Context, key interface{}) time.Duration { if startedAt, ok := ctx.Value(key).(time.Time); ok { return time.Since(startedAt) } @@ -16,7 +17,7 @@ func Since(ctx Context, key interface{}) time.Duration { // GetStringValue returns a string value from the context. The empty string // will be returned if not found. -func GetStringValue(ctx Context, key interface{}) (value string) { +func GetStringValue(ctx context.Context, key interface{}) (value string) { if valuev, ok := ctx.Value(key).(string); ok { value = valuev } diff --git a/context/version.go b/context/version.go index 746cda02..97cf9d66 100644 --- a/context/version.go +++ b/context/version.go @@ -1,16 +1,22 @@ package context +import "context" + +type versionKey struct{} + +func (versionKey) String() string { return "version" } + // WithVersion stores the application version in the context. The new context // gets a logger to ensure log messages are marked with the application // version. -func WithVersion(ctx Context, version string) Context { - ctx = WithValue(ctx, "version", version) +func WithVersion(ctx context.Context, version string) context.Context { + ctx = context.WithValue(ctx, versionKey{}, version) // push a new logger onto the stack - return WithLogger(ctx, GetLogger(ctx, "version")) + return WithLogger(ctx, GetLogger(ctx, versionKey{})) } // GetVersion returns the application version from the context. An empty // string may returned if the version was not set on the context. -func GetVersion(ctx Context) string { - return GetStringValue(ctx, "version") +func GetVersion(ctx context.Context) string { + return GetStringValue(ctx, versionKey{}) } diff --git a/contrib/token-server/main.go b/contrib/token-server/main.go index bfbb470b..138793c7 100644 --- a/contrib/token-server/main.go +++ b/contrib/token-server/main.go @@ -1,6 +1,7 @@ package main import ( + "context" "encoding/json" "flag" "math/rand" @@ -9,7 +10,7 @@ import ( "strings" "time" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/auth" _ "github.com/docker/distribution/registry/auth/htpasswd" @@ -85,7 +86,7 @@ func main() { // TODO: Make configurable issuer.Expiration = 15 * time.Minute - ctx := context.Background() + ctx := dcontext.Background() ts := &tokenServer{ issuer: issuer, @@ -115,23 +116,23 @@ func main() { // request context from a base context. func handlerWithContext(ctx context.Context, handler func(context.Context, http.ResponseWriter, *http.Request)) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := context.WithRequest(ctx, r) - logger := context.GetRequestLogger(ctx) - ctx = context.WithLogger(ctx, logger) + ctx := dcontext.WithRequest(ctx, r) + logger := dcontext.GetRequestLogger(ctx) + ctx = dcontext.WithLogger(ctx, logger) handler(ctx, w, r) }) } func handleError(ctx context.Context, err error, w http.ResponseWriter) { - ctx, w = context.WithResponseWriter(ctx, w) + ctx, w = dcontext.WithResponseWriter(ctx, w) if serveErr := errcode.ServeJSON(w, err); serveErr != nil { - context.GetResponseLogger(ctx).Errorf("error sending error response: %v", serveErr) + dcontext.GetResponseLogger(ctx).Errorf("error sending error response: %v", serveErr) return } - context.GetResponseLogger(ctx).Info("application error") + dcontext.GetResponseLogger(ctx).Info("application error") } var refreshCharacters = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") @@ -173,13 +174,13 @@ func filterAccessList(ctx context.Context, scope string, requestedAccessList []a for _, access := range requestedAccessList { if access.Type == "repository" { if !strings.HasPrefix(access.Name, scope) { - context.GetLogger(ctx).Debugf("Resource scope not allowed: %s", access.Name) + dcontext.GetLogger(ctx).Debugf("Resource scope not allowed: %s", access.Name) continue } if enforceRepoClass { if class, ok := repositoryClassCache[access.Name]; ok { if class != access.Class { - context.GetLogger(ctx).Debugf("Different repository class: %q, previously %q", access.Class, class) + dcontext.GetLogger(ctx).Debugf("Different repository class: %q, previously %q", access.Class, class) continue } } else if strings.EqualFold(access.Action, "push") { @@ -188,12 +189,12 @@ func filterAccessList(ctx context.Context, scope string, requestedAccessList []a } } else if access.Type == "registry" { if access.Name != "catalog" { - context.GetLogger(ctx).Debugf("Unknown registry resource: %s", access.Name) + dcontext.GetLogger(ctx).Debugf("Unknown registry resource: %s", access.Name) continue } // TODO: Limit some actions to "admin" users } else { - context.GetLogger(ctx).Debugf("Skipping unsupported resource type: %s", access.Type) + dcontext.GetLogger(ctx).Debugf("Skipping unsupported resource type: %s", access.Type) continue } grantedAccessList = append(grantedAccessList, access) @@ -216,7 +217,7 @@ func (grantedAccess) String() string { return "grantedAccess" } // getToken handles authenticating the request and authorizing access to the // requested scopes. func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *http.Request) { - context.GetLogger(ctx).Info("getToken") + dcontext.GetLogger(ctx).Info("getToken") params := r.URL.Query() service := params.Get("service") @@ -242,30 +243,30 @@ func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *h } // Get response context. - ctx, w = context.WithResponseWriter(ctx, w) + ctx, w = dcontext.WithResponseWriter(ctx, w) challenge.SetHeaders(w) handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail(challenge.Error()), w) - context.GetResponseLogger(ctx).Info("get token authentication challenge") + dcontext.GetResponseLogger(ctx).Info("get token authentication challenge") return } ctx = authorizedCtx - username := context.GetStringValue(ctx, "auth.user.name") + username := dcontext.GetStringValue(ctx, "auth.user.name") ctx = context.WithValue(ctx, acctSubject{}, username) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, acctSubject{})) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, acctSubject{})) - context.GetLogger(ctx).Info("authenticated client") + dcontext.GetLogger(ctx).Info("authenticated client") ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, requestedAccess{})) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, requestedAccess{})) grantedAccessList := filterAccessList(ctx, username, requestedAccessList) ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, grantedAccess{})) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, grantedAccess{})) token, err := ts.issuer.CreateJWT(username, service, grantedAccessList) if err != nil { @@ -273,7 +274,7 @@ func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *h return } - context.GetLogger(ctx).Info("authorized client") + dcontext.GetLogger(ctx).Info("authorized client") response := tokenResponse{ Token: token, @@ -288,12 +289,12 @@ func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *h } } - ctx, w = context.WithResponseWriter(ctx, w) + ctx, w = dcontext.WithResponseWriter(ctx, w) w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(response) - context.GetResponseLogger(ctx).Info("get token complete") + dcontext.GetResponseLogger(ctx).Info("get token complete") } type postTokenResponse struct { @@ -378,16 +379,16 @@ func (ts *tokenServer) postToken(ctx context.Context, w http.ResponseWriter, r * } ctx = context.WithValue(ctx, acctSubject{}, subject) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, acctSubject{})) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, acctSubject{})) - context.GetLogger(ctx).Info("authenticated client") + dcontext.GetLogger(ctx).Info("authenticated client") ctx = context.WithValue(ctx, requestedAccess{}, requestedAccessList) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, requestedAccess{})) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, requestedAccess{})) grantedAccessList := filterAccessList(ctx, subject, requestedAccessList) ctx = context.WithValue(ctx, grantedAccess{}, grantedAccessList) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, grantedAccess{})) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, grantedAccess{})) token, err := ts.issuer.CreateJWT(subject, service, grantedAccessList) if err != nil { @@ -395,7 +396,7 @@ func (ts *tokenServer) postToken(ctx context.Context, w http.ResponseWriter, r * return } - context.GetLogger(ctx).Info("authorized client") + dcontext.GetLogger(ctx).Info("authorized client") response := postTokenResponse{ Token: token, @@ -416,10 +417,10 @@ func (ts *tokenServer) postToken(ctx context.Context, w http.ResponseWriter, r * response.RefreshToken = rToken } - ctx, w = context.WithResponseWriter(ctx, w) + ctx, w = dcontext.WithResponseWriter(ctx, w) w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(response) - context.GetResponseLogger(ctx).Info("post token complete") + dcontext.GetResponseLogger(ctx).Info("post token complete") } diff --git a/contrib/token-server/token.go b/contrib/token-server/token.go index b0c2abff..8df5b6a8 100644 --- a/contrib/token-server/token.go +++ b/contrib/token-server/token.go @@ -1,6 +1,7 @@ package main import ( + "context" "crypto" "crypto/rand" "encoding/base64" @@ -11,7 +12,7 @@ import ( "strings" "time" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" "github.com/docker/distribution/registry/auth/token" "github.com/docker/libtrust" @@ -27,7 +28,7 @@ func ResolveScopeSpecifiers(ctx context.Context, scopeSpecs []string) []auth.Acc parts := strings.SplitN(scopeSpecifier, ":", 3) if len(parts) != 3 { - context.GetLogger(ctx).Infof("ignoring unsupported scope format %s", scopeSpecifier) + dcontext.GetLogger(ctx).Infof("ignoring unsupported scope format %s", scopeSpecifier) continue } diff --git a/manifest/schema1/config_builder.go b/manifest/schema1/config_builder.go index 9d222566..a96dc3d2 100644 --- a/manifest/schema1/config_builder.go +++ b/manifest/schema1/config_builder.go @@ -1,6 +1,7 @@ package schema1 import ( + "context" "crypto/sha512" "encoding/json" "errors" @@ -8,7 +9,6 @@ import ( "time" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/manifest" "github.com/docker/distribution/reference" "github.com/docker/libtrust" diff --git a/manifest/schema1/config_builder_test.go b/manifest/schema1/config_builder_test.go index 399d8f31..360febd7 100644 --- a/manifest/schema1/config_builder_test.go +++ b/manifest/schema1/config_builder_test.go @@ -3,12 +3,13 @@ package schema1 import ( "bytes" "compress/gzip" + "context" "io" "reflect" "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/libtrust" "github.com/opencontainers/go-digest" @@ -214,13 +215,13 @@ func TestConfigBuilder(t *testing.T) { } } - signed, err := builder.Build(context.Background()) + signed, err := builder.Build(dcontext.Background()) if err != nil { t.Fatalf("Build returned error: %v", err) } // Check that the gzipped empty layer tar was put in the blob store - _, err = bs.Stat(context.Background(), digestSHA256GzippedEmptyTar) + _, err = bs.Stat(dcontext.Background(), digestSHA256GzippedEmptyTar) if err != nil { t.Fatal("gzipped empty tar was not put in the blob store") } diff --git a/manifest/schema1/reference_builder.go b/manifest/schema1/reference_builder.go index ae401478..a4f6032c 100644 --- a/manifest/schema1/reference_builder.go +++ b/manifest/schema1/reference_builder.go @@ -1,11 +1,11 @@ package schema1 import ( + "context" + "errors" "fmt" - "errors" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/manifest" "github.com/docker/distribution/reference" "github.com/docker/libtrust" diff --git a/manifest/schema2/builder.go b/manifest/schema2/builder.go index 4b6ba562..3facaae6 100644 --- a/manifest/schema2/builder.go +++ b/manifest/schema2/builder.go @@ -1,8 +1,9 @@ package schema2 import ( + "context" + "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/opencontainers/go-digest" ) diff --git a/manifest/schema2/builder_test.go b/manifest/schema2/builder_test.go index 697c1bc9..cde73242 100644 --- a/manifest/schema2/builder_test.go +++ b/manifest/schema2/builder_test.go @@ -1,11 +1,11 @@ package schema2 import ( + "context" "reflect" "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/opencontainers/go-digest" ) diff --git a/manifests.go b/manifests.go index 2c99f25d..1816baea 100644 --- a/manifests.go +++ b/manifests.go @@ -1,10 +1,10 @@ package distribution import ( + "context" "fmt" "mime" - "github.com/docker/distribution/context" "github.com/opencontainers/go-digest" ) diff --git a/notifications/listener.go b/notifications/listener.go index 25b5a800..52ec0ee7 100644 --- a/notifications/listener.go +++ b/notifications/listener.go @@ -1,10 +1,11 @@ package notifications import ( + "context" "net/http" "github.com/docker/distribution" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" ) @@ -70,7 +71,7 @@ func (msl *manifestServiceListener) Delete(ctx context.Context, dgst digest.Dige err := msl.ManifestService.Delete(ctx, dgst) if err == nil { if err := msl.parent.listener.ManifestDeleted(msl.parent.Repository.Named(), dgst); err != nil { - context.GetLogger(ctx).Errorf("error dispatching manifest delete to listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error dispatching manifest delete to listener: %v", err) } } @@ -81,7 +82,7 @@ func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest, sm, err := msl.ManifestService.Get(ctx, dgst, options...) if err == nil { if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Named(), sm, options...); err != nil { - context.GetLogger(ctx).Errorf("error dispatching manifest pull to listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error dispatching manifest pull to listener: %v", err) } } @@ -93,7 +94,7 @@ func (msl *manifestServiceListener) Put(ctx context.Context, sm distribution.Man if err == nil { if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Named(), sm, options...); err != nil { - context.GetLogger(ctx).Errorf("error dispatching manifest push to listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error dispatching manifest push to listener: %v", err) } } @@ -111,10 +112,10 @@ func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([] p, err := bsl.BlobStore.Get(ctx, dgst) if err == nil { if desc, err := bsl.Stat(ctx, dgst); err != nil { - context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) } else { if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) } } } @@ -126,10 +127,10 @@ func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (d rc, err := bsl.BlobStore.Open(ctx, dgst) if err == nil { if desc, err := bsl.Stat(ctx, dgst); err != nil { - context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) } else { if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) } } } @@ -141,10 +142,10 @@ func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWr err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst) if err == nil { if desc, err := bsl.Stat(ctx, dgst); err != nil { - context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) } else { if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) } } } @@ -156,7 +157,7 @@ func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []b desc, err := bsl.BlobStore.Put(ctx, mediaType, p) if err == nil { if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Named(), desc); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer push to listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error dispatching layer push to listener: %v", err) } } @@ -168,7 +169,7 @@ func (bsl *blobServiceListener) Create(ctx context.Context, options ...distribut switch err := err.(type) { case distribution.ErrBlobMounted: if err := bsl.parent.listener.BlobMounted(bsl.parent.Repository.Named(), err.Descriptor, err.From); err != nil { - context.GetLogger(ctx).Errorf("error dispatching blob mount to listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error dispatching blob mount to listener: %v", err) } return nil, err } @@ -179,7 +180,7 @@ func (bsl *blobServiceListener) Delete(ctx context.Context, dgst digest.Digest) err := bsl.BlobStore.Delete(ctx, dgst) if err == nil { if err := bsl.parent.listener.BlobDeleted(bsl.parent.Repository.Named(), dgst); err != nil { - context.GetLogger(ctx).Errorf("error dispatching layer delete to listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error dispatching layer delete to listener: %v", err) } } @@ -207,7 +208,7 @@ func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Des committed, err := bwl.BlobWriter.Commit(ctx, desc) if err == nil { if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Named(), committed); err != nil { - context.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) + dcontext.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) } } diff --git a/registry.go b/registry.go index 1da1d533..c34207d0 100644 --- a/registry.go +++ b/registry.go @@ -1,7 +1,8 @@ package distribution import ( - "github.com/docker/distribution/context" + "context" + "github.com/docker/distribution/reference" ) diff --git a/registry/auth/auth.go b/registry/auth/auth.go index 1c9af882..91c7af3f 100644 --- a/registry/auth/auth.go +++ b/registry/auth/auth.go @@ -33,11 +33,10 @@ package auth import ( + "context" "errors" "fmt" "net/http" - - "github.com/docker/distribution/context" ) const ( diff --git a/registry/auth/htpasswd/access.go b/registry/auth/htpasswd/access.go index 819b09ca..8f40913b 100644 --- a/registry/auth/htpasswd/access.go +++ b/registry/auth/htpasswd/access.go @@ -6,13 +6,14 @@ package htpasswd import ( + "context" "fmt" "net/http" "os" "sync" "time" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" ) @@ -41,7 +42,7 @@ func newAccessController(options map[string]interface{}) (auth.AccessController, } func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := context.GetRequest(ctx) + req, err := dcontext.GetRequest(ctx) if err != nil { return nil, err } @@ -83,7 +84,7 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut ac.mu.Unlock() if err := localHTPasswd.authenticateUser(username, password); err != nil { - context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) + dcontext.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) return nil, &challenge{ realm: ac.realm, err: auth.ErrAuthenticationFailure, diff --git a/registry/auth/silly/access.go b/registry/auth/silly/access.go index 2b801d94..f7bbe6e0 100644 --- a/registry/auth/silly/access.go +++ b/registry/auth/silly/access.go @@ -8,11 +8,12 @@ package silly import ( + "context" "fmt" "net/http" "strings" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" ) @@ -43,7 +44,7 @@ func newAccessController(options map[string]interface{}) (auth.AccessController, // Authorized simply checks for the existence of the authorization header, // responding with a bearer challenge if it doesn't exist. func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { - req, err := context.GetRequest(ctx) + req, err := dcontext.GetRequest(ctx) if err != nil { return nil, err } @@ -65,7 +66,11 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut return nil, &challenge } - return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil + ctx = auth.WithUser(ctx, auth.UserInfo{Name: "silly"}) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, auth.UserNameKey, auth.UserKey)) + + return ctx, nil + } type challenge struct { diff --git a/registry/auth/token/accesscontroller.go b/registry/auth/token/accesscontroller.go index 4e8b7f1c..3086c2cf 100644 --- a/registry/auth/token/accesscontroller.go +++ b/registry/auth/token/accesscontroller.go @@ -1,6 +1,7 @@ package token import ( + "context" "crypto" "crypto/x509" "encoding/pem" @@ -11,7 +12,7 @@ import ( "os" "strings" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" "github.com/docker/libtrust" ) @@ -221,7 +222,7 @@ func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth. accessSet: newAccessSet(accessItems...), } - req, err := context.GetRequest(ctx) + req, err := dcontext.GetRequest(ctx) if err != nil { return nil, err } diff --git a/registry/client/blob_writer.go b/registry/client/blob_writer.go index e3ffcb00..695bf852 100644 --- a/registry/client/blob_writer.go +++ b/registry/client/blob_writer.go @@ -2,6 +2,7 @@ package client import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -9,7 +10,6 @@ import ( "time" "github.com/docker/distribution" - "github.com/docker/distribution/context" ) type httpBlobUpload struct { diff --git a/registry/client/repository.go b/registry/client/repository.go index b82a968e..8bd2c3fb 100644 --- a/registry/client/repository.go +++ b/registry/client/repository.go @@ -2,6 +2,7 @@ package client import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -14,7 +15,6 @@ import ( "time" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go index c8e756de..98dcaa71 100644 --- a/registry/handlers/api_test.go +++ b/registry/handlers/api_test.go @@ -2,6 +2,7 @@ package handlers import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -21,7 +22,6 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" diff --git a/registry/handlers/app.go b/registry/handlers/app.go index a5af88dc..813bae16 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -1,6 +1,7 @@ package handlers import ( + "context" cryptorand "crypto/rand" "expvar" "fmt" @@ -16,7 +17,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/configuration" - ctxu "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/health" "github.com/docker/distribution/health/checks" "github.com/docker/distribution/notifications" @@ -37,8 +38,7 @@ import ( "github.com/docker/libtrust" "github.com/garyburd/redigo/redis" "github.com/gorilla/mux" - log "github.com/sirupsen/logrus" - "golang.org/x/net/context" + "github.com/sirupsen/logrus" ) // randomSecretSize is the number of random bytes to generate if no secret @@ -145,7 +145,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { } } - startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) + startUploadPurger(app, app.driver, dcontext.GetLogger(app), purgeConfig) app.driver, err = applyStorageMiddleware(app.driver, config.Middleware["storage"]) if err != nil { @@ -208,7 +208,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { } } if redirectDisabled { - ctxu.GetLogger(app).Infof("backend redirection disabled") + dcontext.GetLogger(app).Infof("backend redirection disabled") } else { options = append(options, storage.EnableRedirect) } @@ -269,7 +269,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { if err != nil { panic("could not create registry: " + err.Error()) } - ctxu.GetLogger(app).Infof("using redis blob descriptor cache") + dcontext.GetLogger(app).Infof("using redis blob descriptor cache") case "inmemory": cacheProvider := memorycache.NewInMemoryBlobDescriptorCacheProvider() localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) @@ -277,10 +277,10 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { if err != nil { panic("could not create registry: " + err.Error()) } - ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") + dcontext.GetLogger(app).Infof("using inmemory blob descriptor cache") default: if v != "" { - ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", config.Storage["cache"]) + dcontext.GetLogger(app).Warnf("unknown cache type %q, caching disabled", config.Storage["cache"]) } } } @@ -306,7 +306,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) } app.accessController = accessController - ctxu.GetLogger(app).Debugf("configured %q access controller", authType) + dcontext.GetLogger(app).Debugf("configured %q access controller", authType) } // configure as a pull through cache @@ -316,7 +316,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { panic(err.Error()) } app.isCache = true - ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", config.Proxy.RemoteURL) + dcontext.GetLogger(app).Info("Registry configured as a proxy cache to ", config.Proxy.RemoteURL) } return app @@ -361,7 +361,7 @@ func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { if interval == 0 { interval = defaultCheckInterval } - ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) + dcontext.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) healthRegistry.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) } @@ -379,10 +379,10 @@ func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout, httpChecker.Headers) if httpChecker.Threshold != 0 { - ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) + dcontext.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) healthRegistry.Register(httpChecker.URI, health.PeriodicThresholdChecker(checker, interval, httpChecker.Threshold)) } else { - ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second) + dcontext.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second) healthRegistry.Register(httpChecker.URI, health.PeriodicChecker(checker, interval)) } } @@ -396,10 +396,10 @@ func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { checker := checks.TCPChecker(tcpChecker.Addr, tcpChecker.Timeout) if tcpChecker.Threshold != 0 { - ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d, threshold=%d", tcpChecker.Addr, interval/time.Second, tcpChecker.Threshold) + dcontext.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d, threshold=%d", tcpChecker.Addr, interval/time.Second, tcpChecker.Threshold) healthRegistry.Register(tcpChecker.Addr, health.PeriodicThresholdChecker(checker, interval, tcpChecker.Threshold)) } else { - ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d", tcpChecker.Addr, interval/time.Second) + dcontext.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d", tcpChecker.Addr, interval/time.Second) healthRegistry.Register(tcpChecker.Addr, health.PeriodicChecker(checker, interval)) } } @@ -425,11 +425,11 @@ func (app *App) configureEvents(configuration *configuration.Configuration) { var sinks []notifications.Sink for _, endpoint := range configuration.Notifications.Endpoints { if endpoint.Disabled { - ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) + dcontext.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) continue } - ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) + dcontext.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ Timeout: endpoint.Timeout, Threshold: endpoint.Threshold, @@ -461,7 +461,7 @@ func (app *App) configureEvents(configuration *configuration.Configuration) { app.events.source = notifications.SourceRecord{ Addr: hostname, - InstanceID: ctxu.GetStringValue(app, "instance.id"), + InstanceID: dcontext.GetStringValue(app, "instance.id"), } } @@ -469,7 +469,7 @@ type redisStartAtKey struct{} func (app *App) configureRedis(configuration *configuration.Configuration) { if configuration.Redis.Addr == "" { - ctxu.GetLogger(app).Infof("redis not configured") + dcontext.GetLogger(app).Infof("redis not configured") return } @@ -479,8 +479,8 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { ctx := context.WithValue(app, redisStartAtKey{}, time.Now()) done := func(err error) { - logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", - ctxu.Since(ctx, redisStartAtKey{})) + logger := dcontext.GetLoggerWithField(ctx, "redis.connect.duration", + dcontext.Since(ctx, redisStartAtKey{})) if err != nil { logger.Errorf("redis: error connecting: %v", err) } else { @@ -494,7 +494,7 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { configuration.Redis.ReadTimeout, configuration.Redis.WriteTimeout) if err != nil { - ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", + dcontext.GetLogger(app).Errorf("error connecting to redis instance %s: %v", configuration.Redis.Addr, err) done(err) return nil, err @@ -551,7 +551,7 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { // configureLogHook prepares logging hook parameters. func (app *App) configureLogHook(configuration *configuration.Configuration) { - entry, ok := ctxu.GetLogger(app).(*log.Entry) + entry, ok := dcontext.GetLogger(app).(*logrus.Entry) if !ok { // somehow, we are not using logrus return @@ -589,7 +589,7 @@ func (app *App) configureSecret(configuration *configuration.Configuration) { panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) } configuration.HTTP.Secret = string(secretBytes[:]) - ctxu.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") + dcontext.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") } } @@ -598,15 +598,15 @@ func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Prepare the context with our own little decorations. ctx := r.Context() - ctx = ctxu.WithRequest(ctx, r) - ctx, w = ctxu.WithResponseWriter(ctx, w) - ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) + ctx = dcontext.WithRequest(ctx, r) + ctx, w = dcontext.WithResponseWriter(ctx, w) + ctx = dcontext.WithLogger(ctx, dcontext.GetRequestLogger(ctx)) r = r.WithContext(ctx) defer func() { status, ok := ctx.Value("http.response.status").(int) if ok && status >= 200 && status <= 399 { - ctxu.GetResponseLogger(r.Context()).Infof("response completed") + dcontext.GetResponseLogger(r.Context()).Infof("response completed") } }() @@ -637,12 +637,12 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context := app.context(w, r) if err := app.authorized(w, r, context); err != nil { - ctxu.GetLogger(context).Warnf("error authorizing context: %v", err) + dcontext.GetLogger(context).Warnf("error authorizing context: %v", err) return } // Add username to request logging - context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, auth.UserNameKey)) + context.Context = dcontext.WithLogger(context.Context, dcontext.GetLogger(context.Context, auth.UserNameKey)) // sync up context on the request. r = r.WithContext(context) @@ -650,20 +650,20 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { if app.nameRequired(r) { nameRef, err := reference.WithName(getName(context)) if err != nil { - ctxu.GetLogger(context).Errorf("error parsing reference from context: %v", err) + dcontext.GetLogger(context).Errorf("error parsing reference from context: %v", err) context.Errors = append(context.Errors, distribution.ErrRepositoryNameInvalid{ Name: getName(context), Reason: err, }) if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } return } repository, err := app.registry.Repository(context, nameRef) if err != nil { - ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) + dcontext.GetLogger(context).Errorf("error resolving repository: %v", err) switch err := err.(type) { case distribution.ErrRepositoryUnknown: @@ -675,7 +675,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { } if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } return } @@ -687,11 +687,11 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { context.Repository, err = applyRepoMiddleware(app, context.Repository, app.Config.Middleware["repository"]) if err != nil { - ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) + dcontext.GetLogger(context).Errorf("error initializing repository middleware: %v", err) context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } return } @@ -703,7 +703,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { // for layer upload). if context.Errors.Len() > 0 { if err := errcode.ServeJSON(w, context.Errors); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } app.logError(context, context.Errors) @@ -723,31 +723,31 @@ type errDetailKey struct{} func (errDetailKey) String() string { return "err.detail" } -func (app *App) logError(context context.Context, errors errcode.Errors) { +func (app *App) logError(ctx context.Context, errors errcode.Errors) { for _, e1 := range errors { - var c ctxu.Context + var c context.Context switch e1.(type) { case errcode.Error: e, _ := e1.(errcode.Error) - c = ctxu.WithValue(context, errCodeKey{}, e.Code) - c = ctxu.WithValue(c, errMessageKey{}, e.Code.Message()) - c = ctxu.WithValue(c, errDetailKey{}, e.Detail) + c = context.WithValue(ctx, errCodeKey{}, e.Code) + c = context.WithValue(c, errMessageKey{}, e.Code.Message()) + c = context.WithValue(c, errDetailKey{}, e.Detail) case errcode.ErrorCode: e, _ := e1.(errcode.ErrorCode) - c = ctxu.WithValue(context, errCodeKey{}, e) - c = ctxu.WithValue(c, errMessageKey{}, e.Message()) + c = context.WithValue(ctx, errCodeKey{}, e) + c = context.WithValue(c, errMessageKey{}, e.Message()) default: // just normal go 'error' - c = ctxu.WithValue(context, errCodeKey{}, errcode.ErrorCodeUnknown) - c = ctxu.WithValue(c, errMessageKey{}, e1.Error()) + c = context.WithValue(ctx, errCodeKey{}, errcode.ErrorCodeUnknown) + c = context.WithValue(c, errMessageKey{}, e1.Error()) } - c = ctxu.WithLogger(c, ctxu.GetLogger(c, + c = dcontext.WithLogger(c, dcontext.GetLogger(c, errCodeKey{}, errMessageKey{}, errDetailKey{})) - ctxu.GetResponseLogger(c).Errorf("response completed with error") + dcontext.GetResponseLogger(c).Errorf("response completed with error") } } @@ -755,8 +755,8 @@ func (app *App) logError(context context.Context, errors errcode.Errors) { // called once per request. func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { ctx := r.Context() - ctx = ctxu.WithVars(ctx, r) - ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, + ctx = dcontext.WithVars(ctx, r) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, "vars.name", "vars.reference", "vars.digest", @@ -783,7 +783,7 @@ func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { // repository. If it succeeds, the context may access the requested // repository. An error will be returned if access is not available. func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { - ctxu.GetLogger(context).Debug("authorizing request") + dcontext.GetLogger(context).Debug("authorizing request") repo := getName(context) if app.accessController == nil { @@ -809,7 +809,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont // that mistake elsewhere in the code, allowing any operation to // proceed. if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } return fmt.Errorf("forbidden: no repository name") } @@ -824,20 +824,21 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont err.SetHeaders(w) if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { - ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) + dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } default: // This condition is a potential security problem either in // the configuration or whatever is backing the access // controller. Just return a bad request with no information // to avoid exposure. The request should not proceed. - ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) + dcontext.GetLogger(context).Errorf("error checking authorization: %v", err) w.WriteHeader(http.StatusBadRequest) } return err } + dcontext.GetLogger(ctx).Info("authorized request") // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context // should be replaced by another, rather than replacing the context on a // mutable object. @@ -851,7 +852,7 @@ func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listene actor := notifications.ActorRecord{ Name: getUserName(ctx, r), } - request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) + request := notifications.NewRequestRecord(dcontext.GetRequestID(ctx), r) return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) } @@ -986,7 +987,7 @@ func badPurgeUploadConfig(reason string) { // startUploadPurger schedules a goroutine which will periodically // check upload directories for old files and delete them -func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { +func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log dcontext.Logger, config map[interface{}]interface{}) { if config["enabled"] == false { return } diff --git a/registry/handlers/blobupload.go b/registry/handlers/blobupload.go index 0b39fff5..49ab1aaa 100644 --- a/registry/handlers/blobupload.go +++ b/registry/handlers/blobupload.go @@ -6,7 +6,7 @@ import ( "net/url" "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" @@ -39,7 +39,7 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) if err != nil { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) + dcontext.GetLogger(ctx).Infof("error resolving upload: %v", err) buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) }) } @@ -47,14 +47,14 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if state.Name != ctx.Repository.Named().Name() { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Named().Name()) + dcontext.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Named().Name()) buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) }) } if state.UUID != buh.UUID { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) + dcontext.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) }) } @@ -62,7 +62,7 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { blobs := ctx.Repository.Blobs(buh) upload, err := blobs.Resume(buh, buh.UUID) if err != nil { - ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) + dcontext.GetLogger(ctx).Errorf("error resolving upload: %v", err) if err == distribution.ErrBlobUploadUnknown { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err)) @@ -77,7 +77,7 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if size := upload.Size(); size != buh.State.Offset { defer upload.Close() - ctxu.GetLogger(ctx).Errorf("upload resumed at wrong offest: %d != %d", size, buh.State.Offset) + dcontext.GetLogger(ctx).Errorf("upload resumed at wrong offest: %d != %d", size, buh.State.Offset) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) upload.Cancel(buh) @@ -179,7 +179,7 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque // TODO(dmcgowan): support Content-Range header to seek and write range - if err := copyFullPayload(w, r, buh.Upload, -1, buh, "blob PATCH"); err != nil { + if err := copyFullPayload(buh, w, r, buh.Upload, -1, "blob PATCH"); err != nil { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error())) return } @@ -218,7 +218,7 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht return } - if err := copyFullPayload(w, r, buh.Upload, -1, buh, "blob PUT"); err != nil { + if err := copyFullPayload(buh, w, r, buh.Upload, -1, "blob PUT"); err != nil { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error())) return } @@ -246,7 +246,7 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) default: - ctxu.GetLogger(buh).Errorf("unknown error completing upload: %v", err) + dcontext.GetLogger(buh).Errorf("unknown error completing upload: %v", err) buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } @@ -255,7 +255,7 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht // Clean up the backend blob data if there was an error. if err := buh.Upload.Cancel(buh); err != nil { // If the cleanup fails, all we can do is observe and report. - ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err) + dcontext.GetLogger(buh).Errorf("error canceling upload after error: %v", err) } return @@ -275,7 +275,7 @@ func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Re w.Header().Set("Docker-Upload-UUID", buh.UUID) if err := buh.Upload.Cancel(buh); err != nil { - ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) + dcontext.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } @@ -297,7 +297,7 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) if err != nil { - ctxu.GetLogger(buh).Infof("error building upload state token: %s", err) + dcontext.GetLogger(buh).Infof("error building upload state token: %s", err) return err } @@ -307,7 +307,7 @@ func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http. "_state": []string{token}, }) if err != nil { - ctxu.GetLogger(buh).Infof("error building upload url: %s", err) + dcontext.GetLogger(buh).Infof("error building upload url: %s", err) return err } diff --git a/registry/handlers/context.go b/registry/handlers/context.go index 6c1be5b3..a775be7f 100644 --- a/registry/handlers/context.go +++ b/registry/handlers/context.go @@ -5,7 +5,7 @@ import ( "net/http" "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" @@ -44,26 +44,26 @@ func (ctx *Context) Value(key interface{}) interface{} { } func getName(ctx context.Context) (name string) { - return ctxu.GetStringValue(ctx, "vars.name") + return dcontext.GetStringValue(ctx, "vars.name") } func getReference(ctx context.Context) (reference string) { - return ctxu.GetStringValue(ctx, "vars.reference") + return dcontext.GetStringValue(ctx, "vars.reference") } var errDigestNotAvailable = fmt.Errorf("digest not available in context") func getDigest(ctx context.Context) (dgst digest.Digest, err error) { - dgstStr := ctxu.GetStringValue(ctx, "vars.digest") + dgstStr := dcontext.GetStringValue(ctx, "vars.digest") if dgstStr == "" { - ctxu.GetLogger(ctx).Errorf("digest not available") + dcontext.GetLogger(ctx).Errorf("digest not available") return "", errDigestNotAvailable } d, err := digest.Parse(dgstStr) if err != nil { - ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) + dcontext.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) return "", err } @@ -71,13 +71,13 @@ func getDigest(ctx context.Context) (dgst digest.Digest, err error) { } func getUploadUUID(ctx context.Context) (uuid string) { - return ctxu.GetStringValue(ctx, "vars.uuid") + return dcontext.GetStringValue(ctx, "vars.uuid") } // getUserName attempts to resolve a username from the context and request. If // a username cannot be resolved, the empty string is returned. func getUserName(ctx context.Context, r *http.Request) string { - username := ctxu.GetStringValue(ctx, auth.UserNameKey) + username := dcontext.GetStringValue(ctx, auth.UserNameKey) // Fallback to request user with basic auth if username == "" { diff --git a/registry/handlers/helpers.go b/registry/handlers/helpers.go index dc3091a9..46ad797e 100644 --- a/registry/handlers/helpers.go +++ b/registry/handlers/helpers.go @@ -1,11 +1,12 @@ package handlers import ( + "context" "errors" "io" "net/http" - ctxu "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" ) // closeResources closes all the provided resources after running the target @@ -24,13 +25,13 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { // upload, it avoids sending a 400 error to keep the logs cleaner. // // The copy will be limited to `limit` bytes, if limit is greater than zero. -func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, context ctxu.Context, action string) error { +func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, action string) error { // Get a channel that tells us if the client disconnects var clientClosed <-chan bool if notifier, ok := responseWriter.(http.CloseNotifier); ok { clientClosed = notifier.CloseNotify() } else { - ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) + dcontext.GetLogger(ctx).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) } var body = r.Body @@ -52,7 +53,7 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr // instead of showing 0 for the HTTP status. responseWriter.WriteHeader(499) - ctxu.GetLoggerWithFields(context, map[interface{}]interface{}{ + dcontext.GetLoggerWithFields(ctx, map[interface{}]interface{}{ "error": err, "copied": copied, "contentLength": r.ContentLength, @@ -63,7 +64,7 @@ func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWr } if err != nil { - ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) + dcontext.GetLogger(ctx).Errorf("unknown error reading request payload: %v", err) return err } diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go index bbbc10fa..4e1d9868 100644 --- a/registry/handlers/manifests.go +++ b/registry/handlers/manifests.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/docker/distribution" - ctxu "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" @@ -66,7 +66,7 @@ type manifestHandler struct { // GetManifest fetches the image manifest from the storage backend, if it exists. func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("GetImageManifest") + dcontext.GetLogger(imh).Debug("GetImageManifest") manifests, err := imh.Repository.Manifests(imh) if err != nil { imh.Errors = append(imh.Errors, err) @@ -143,7 +143,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) // matching the digest. if imh.Tag != "" && isSchema2 && !supportsSchema2 { // Rewrite manifest in schema1 format - ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) + dcontext.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) manifest, err = imh.convertSchema2Manifest(schema2Manifest) if err != nil { @@ -151,7 +151,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) } } else if imh.Tag != "" && isManifestList && !supportsManifestList { // Rewrite manifest in schema1 format - ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) + dcontext.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) // Find the image manifest corresponding to the default // platform @@ -252,7 +252,7 @@ func etagMatch(r *http.Request, etag string) bool { // PutManifest validates and stores a manifest in the registry. func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("PutImageManifest") + dcontext.GetLogger(imh).Debug("PutImageManifest") manifests, err := imh.Repository.Manifests(imh) if err != nil { imh.Errors = append(imh.Errors, err) @@ -260,7 +260,7 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) } var jsonBuf bytes.Buffer - if err := copyFullPayload(w, r, &jsonBuf, maxManifestBodySize, imh, "image manifest PUT"); err != nil { + if err := copyFullPayload(imh, w, r, &jsonBuf, maxManifestBodySize, "image manifest PUT"); err != nil { // copyFullPayload reports the error if necessary imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err.Error())) return @@ -275,7 +275,7 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) if imh.Digest != "" { if desc.Digest != imh.Digest { - ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest) + dcontext.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest) imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) return } @@ -358,7 +358,7 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to // happen. We'll log the error here but proceed as if it worked. Worst // case, we set an empty location header. - ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) + dcontext.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) } w.Header().Set("Location", location) @@ -435,7 +435,7 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) // DeleteManifest removes the manifest with the given digest from the registry. func (imh *manifestHandler) DeleteManifest(w http.ResponseWriter, r *http.Request) { - ctxu.GetLogger(imh).Debug("DeleteImageManifest") + dcontext.GetLogger(imh).Debug("DeleteImageManifest") manifests, err := imh.Repository.Manifests(imh) if err != nil { diff --git a/registry/middleware/registry/middleware.go b/registry/middleware/registry/middleware.go index 3e6e5cc7..49defd82 100644 --- a/registry/middleware/registry/middleware.go +++ b/registry/middleware/registry/middleware.go @@ -1,10 +1,10 @@ package middleware import ( + "context" "fmt" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage" ) diff --git a/registry/middleware/repository/middleware.go b/registry/middleware/repository/middleware.go index 27b42aec..f1554b70 100644 --- a/registry/middleware/repository/middleware.go +++ b/registry/middleware/repository/middleware.go @@ -1,10 +1,10 @@ package middleware import ( + "context" "fmt" "github.com/docker/distribution" - "github.com/docker/distribution/context" ) // InitFunc is the type of a RepositoryMiddleware factory function and is diff --git a/registry/proxy/proxyblobstore.go b/registry/proxy/proxyblobstore.go index c3f1b92f..0f2c7e39 100644 --- a/registry/proxy/proxyblobstore.go +++ b/registry/proxy/proxyblobstore.go @@ -1,6 +1,7 @@ package proxy import ( + "context" "io" "net/http" "strconv" @@ -8,7 +9,7 @@ import ( "time" "github.com/docker/distribution" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/opencontainers/go-digest" @@ -116,7 +117,7 @@ func (pbs *proxyBlobStore) storeLocal(ctx context.Context, dgst digest.Digest) e func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { served, err := pbs.serveLocal(ctx, w, r, dgst) if err != nil { - context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error()) + dcontext.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error()) return err } @@ -140,12 +141,12 @@ func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, go func(dgst digest.Digest) { if err := pbs.storeLocal(ctx, dgst); err != nil { - context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) + dcontext.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) } blobRef, err := reference.WithDigest(pbs.repositoryName, dgst) if err != nil { - context.GetLogger(ctx).Errorf("Error creating reference: %s", err) + dcontext.GetLogger(ctx).Errorf("Error creating reference: %s", err) return } diff --git a/registry/proxy/proxyblobstore_test.go b/registry/proxy/proxyblobstore_test.go index 0bba01a6..9bee48d8 100644 --- a/registry/proxy/proxyblobstore_test.go +++ b/registry/proxy/proxyblobstore_test.go @@ -1,6 +1,7 @@ package proxy import ( + "context" "io/ioutil" "math/rand" "net/http" @@ -10,7 +11,6 @@ import ( "time" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" diff --git a/registry/proxy/proxymanifeststore.go b/registry/proxy/proxymanifeststore.go index e0a9f7d3..5c7264d9 100644 --- a/registry/proxy/proxymanifeststore.go +++ b/registry/proxy/proxymanifeststore.go @@ -1,10 +1,11 @@ package proxy import ( + "context" "time" "github.com/docker/distribution" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/opencontainers/go-digest" @@ -72,7 +73,7 @@ func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, optio // Schedule the manifest blob for removal repoBlob, err := reference.WithDigest(pms.repositoryName, dgst) if err != nil { - context.GetLogger(ctx).Errorf("Error creating reference: %s", err) + dcontext.GetLogger(ctx).Errorf("Error creating reference: %s", err) return nil, err } diff --git a/registry/proxy/proxymanifeststore_test.go b/registry/proxy/proxymanifeststore_test.go index ca0845b9..5f3bd34e 100644 --- a/registry/proxy/proxymanifeststore_test.go +++ b/registry/proxy/proxymanifeststore_test.go @@ -1,12 +1,12 @@ package proxy import ( + "context" "io" "sync" "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" diff --git a/registry/proxy/proxyregistry.go b/registry/proxy/proxyregistry.go index d64dcbb9..aec220e8 100644 --- a/registry/proxy/proxyregistry.go +++ b/registry/proxy/proxyregistry.go @@ -1,6 +1,7 @@ package proxy import ( + "context" "fmt" "net/http" "net/url" @@ -8,7 +9,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth" @@ -218,7 +219,7 @@ func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error return err } - context.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm) + dcontext.GetLogger(ctx).Infof("Challenge established with upstream : %s %s", remoteURL, r.cm) return nil } diff --git a/registry/proxy/proxytagservice.go b/registry/proxy/proxytagservice.go index a8273030..6a925639 100644 --- a/registry/proxy/proxytagservice.go +++ b/registry/proxy/proxytagservice.go @@ -1,8 +1,9 @@ package proxy import ( + "context" + "github.com/docker/distribution" - "github.com/docker/distribution/context" ) // proxyTagService supports local and remote lookup of tags. diff --git a/registry/proxy/proxytagservice_test.go b/registry/proxy/proxytagservice_test.go index ce0fe78b..1314121e 100644 --- a/registry/proxy/proxytagservice_test.go +++ b/registry/proxy/proxytagservice_test.go @@ -1,13 +1,13 @@ package proxy import ( + "context" "reflect" "sort" "sync" "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" ) type mockTagStore struct { diff --git a/registry/proxy/scheduler/scheduler.go b/registry/proxy/scheduler/scheduler.go index bde94657..f8340184 100644 --- a/registry/proxy/scheduler/scheduler.go +++ b/registry/proxy/scheduler/scheduler.go @@ -1,12 +1,13 @@ package scheduler import ( + "context" "encoding/json" "fmt" "sync" "time" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" ) @@ -120,7 +121,7 @@ func (ttles *TTLExpirationScheduler) Start() error { return fmt.Errorf("Scheduler already started") } - context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") + dcontext.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") ttles.stopped = false // Start timer for each deserialized entry @@ -142,7 +143,7 @@ func (ttles *TTLExpirationScheduler) Start() error { err := ttles.writeState() if err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + dcontext.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) } else { ttles.indexDirty = false } @@ -163,7 +164,7 @@ func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duratio Expiry: time.Now().Add(ttl), EntryType: eType, } - context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) + dcontext.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil { oldEntry.timer.Stop() } @@ -193,10 +194,10 @@ func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time. ref, err := reference.Parse(entry.Key) if err == nil { if err := f(ref); err != nil { - context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) + dcontext.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) } } else { - context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err) + dcontext.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err) } delete(ttles.entries, entry.Key) @@ -210,7 +211,7 @@ func (ttles *TTLExpirationScheduler) Stop() { defer ttles.Unlock() if err := ttles.writeState(); err != nil { - context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) + dcontext.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) } for _, entry := range ttles.entries { diff --git a/registry/registry.go b/registry/registry.go index fbb4dd87..2e887784 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -1,6 +1,7 @@ package registry import ( + "context" "crypto/tls" "crypto/x509" "fmt" @@ -14,7 +15,7 @@ import ( logstash "github.com/bshuster-repo/logrus-logstash-hook" "github.com/bugsnag/bugsnag-go" "github.com/docker/distribution/configuration" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/health" "github.com/docker/distribution/registry/handlers" "github.com/docker/distribution/registry/listener" @@ -34,7 +35,7 @@ var ServeCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { // setup context - ctx := context.WithVersion(context.Background(), version.Version) + ctx := dcontext.WithVersion(dcontext.Background(), version.Version) config, err := resolveConfiguration(args) if err != nil { @@ -81,7 +82,7 @@ func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Reg // inject a logger into the uuid library. warns us if there is a problem // with uuid generation under low entropy. - uuid.Loggerf = context.GetLogger(ctx).Warnf + uuid.Loggerf = dcontext.GetLogger(ctx).Warnf app := handlers.NewApp(ctx, config) // TODO(aaronl): The global scope of the health checks means NewRegistry @@ -170,7 +171,7 @@ func (registry *Registry) ListenAndServe() error { } for _, subj := range pool.Subjects() { - context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) + dcontext.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) } tlsConf.ClientAuth = tls.RequireAndVerifyClientCert @@ -178,9 +179,9 @@ func (registry *Registry) ListenAndServe() error { } ln = tls.NewListener(ln, tlsConf) - context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) + dcontext.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) } else { - context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) + dcontext.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) } return registry.server.Serve(ln) @@ -228,7 +229,7 @@ func configureLogging(ctx context.Context, config *configuration.Configuration) if config.Log.Level == "" && config.Log.Formatter == "" { // If no config for logging is set, fallback to deprecated "Loglevel". log.SetLevel(logLevel(config.Loglevel)) - ctx = context.WithLogger(ctx, context.GetLogger(ctx)) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx)) return ctx, nil } @@ -270,8 +271,8 @@ func configureLogging(ctx context.Context, config *configuration.Configuration) fields = append(fields, k) } - ctx = context.WithValues(ctx, config.Log.Fields) - ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) + ctx = dcontext.WithValues(ctx, config.Log.Fields) + ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, fields...)) } return ctx, nil diff --git a/registry/root.go b/registry/root.go index 5d3005c2..538139f1 100644 --- a/registry/root.go +++ b/registry/root.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/driver/factory" "github.com/docker/distribution/version" @@ -56,7 +56,7 @@ var GCCmd = &cobra.Command{ os.Exit(1) } - ctx := context.Background() + ctx := dcontext.Background() ctx, err = configureLogging(ctx, config) if err != nil { fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err) diff --git a/registry/storage/blob_test.go b/registry/storage/blob_test.go index a263dd6c..fc027f4e 100644 --- a/registry/storage/blob_test.go +++ b/registry/storage/blob_test.go @@ -2,6 +2,7 @@ package storage import ( "bytes" + "context" "crypto/sha256" "fmt" "io" @@ -12,7 +13,6 @@ import ( "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/testdriver" diff --git a/registry/storage/blobserver.go b/registry/storage/blobserver.go index 739bf3cb..57d4f907 100644 --- a/registry/storage/blobserver.go +++ b/registry/storage/blobserver.go @@ -1,12 +1,12 @@ package storage import ( + "context" "fmt" "net/http" "time" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" "github.com/opencontainers/go-digest" ) diff --git a/registry/storage/blobstore.go b/registry/storage/blobstore.go index 4a16488b..bfa41937 100644 --- a/registry/storage/blobstore.go +++ b/registry/storage/blobstore.go @@ -1,10 +1,11 @@ package storage import ( + "context" "path" "github.com/docker/distribution" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" "github.com/opencontainers/go-digest" ) @@ -64,7 +65,7 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr // content already present return desc, nil } else if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err) + dcontext.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err) // real error, return it return distribution.Descriptor{}, err } @@ -195,7 +196,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi // NOTE(stevvooe): This represents a corruption situation. Somehow, we // calculated a blob path and then detected a directory. We log the // error and then error on the side of not knowing about the blob. - context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) + dcontext.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) return distribution.Descriptor{}, distribution.ErrBlobUnknown } diff --git a/registry/storage/blobwriter.go b/registry/storage/blobwriter.go index 392b166c..d56f2874 100644 --- a/registry/storage/blobwriter.go +++ b/registry/storage/blobwriter.go @@ -1,6 +1,7 @@ package storage import ( + "context" "errors" "fmt" "io" @@ -8,7 +9,7 @@ import ( "time" "github.com/docker/distribution" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" @@ -56,7 +57,7 @@ func (bw *blobWriter) StartedAt() time.Time { // Commit marks the upload as completed, returning a valid descriptor. The // final size and digest are checked against the first descriptor provided. func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - context.GetLogger(ctx).Debug("(*blobWriter).Commit") + dcontext.GetLogger(ctx).Debug("(*blobWriter).Commit") if err := bw.fileWriter.Commit(); err != nil { return distribution.Descriptor{}, err @@ -94,13 +95,13 @@ func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) // Cancel the blob upload process, releasing any resources associated with // the writer and canceling the operation. func (bw *blobWriter) Cancel(ctx context.Context) error { - context.GetLogger(ctx).Debug("(*blobWriter).Cancel") + dcontext.GetLogger(ctx).Debug("(*blobWriter).Cancel") if err := bw.fileWriter.Cancel(); err != nil { return err } if err := bw.Close(); err != nil { - context.GetLogger(ctx).Errorf("error closing blobwriter: %s", err) + dcontext.GetLogger(ctx).Errorf("error closing blobwriter: %s", err) } if err := bw.removeResources(ctx); err != nil { @@ -261,7 +262,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri } if !verified { - context.GetLoggerWithFields(ctx, + dcontext.GetLoggerWithFields(ctx, map[interface{}]interface{}{ "canonical": canonical, "provided": desc.Digest, @@ -365,7 +366,7 @@ func (bw *blobWriter) removeResources(ctx context.Context) error { // This should be uncommon enough such that returning an error // should be okay. At this point, the upload should be mostly // complete, but perhaps the backend became unaccessible. - context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) + dcontext.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) return err } } @@ -383,7 +384,7 @@ func (bw *blobWriter) Reader() (io.ReadCloser, error) { } switch err.(type) { case storagedriver.PathNotFoundError: - context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) + dcontext.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) time.Sleep(1 * time.Second) try++ default: diff --git a/registry/storage/blobwriter_resumable.go b/registry/storage/blobwriter_resumable.go index cc1a833d..8c909169 100644 --- a/registry/storage/blobwriter_resumable.go +++ b/registry/storage/blobwriter_resumable.go @@ -3,11 +3,11 @@ package storage import ( + "context" "fmt" "path" "strconv" - "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/sirupsen/logrus" "github.com/stevvooe/resumable" diff --git a/registry/storage/cache/cachecheck/suite.go b/registry/storage/cache/cachecheck/suite.go index 0bbd5295..b23765e8 100644 --- a/registry/storage/cache/cachecheck/suite.go +++ b/registry/storage/cache/cachecheck/suite.go @@ -1,11 +1,11 @@ package cachecheck import ( + "context" "reflect" "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/cache" "github.com/opencontainers/go-digest" ) diff --git a/registry/storage/cache/cachedblobdescriptorstore.go b/registry/storage/cache/cachedblobdescriptorstore.go index f647616b..668eebda 100644 --- a/registry/storage/cache/cachedblobdescriptorstore.go +++ b/registry/storage/cache/cachedblobdescriptorstore.go @@ -1,10 +1,11 @@ package cache import ( - "github.com/docker/distribution/context" - "github.com/opencontainers/go-digest" + "context" "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" + "github.com/opencontainers/go-digest" ) // Metrics is used to hold metric counters @@ -53,7 +54,7 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di desc, err := cbds.cache.Stat(ctx, dgst) if err != nil { if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) + dcontext.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) } goto fallback @@ -73,7 +74,7 @@ fallback: } if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + dcontext.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) } return desc, err @@ -95,7 +96,7 @@ func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) er func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + dcontext.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) } return nil } diff --git a/registry/storage/cache/memory/memory.go b/registry/storage/cache/memory/memory.go index b2fcaf4e..42d94d9b 100644 --- a/registry/storage/cache/memory/memory.go +++ b/registry/storage/cache/memory/memory.go @@ -1,10 +1,10 @@ package memory import ( + "context" "sync" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" "github.com/opencontainers/go-digest" diff --git a/registry/storage/cache/redis/redis.go b/registry/storage/cache/redis/redis.go index 98f0f759..36c6ac60 100644 --- a/registry/storage/cache/redis/redis.go +++ b/registry/storage/cache/redis/redis.go @@ -1,10 +1,10 @@ package redis import ( + "context" "fmt" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" "github.com/garyburd/redigo/redis" diff --git a/registry/storage/catalog.go b/registry/storage/catalog.go index 0b59a39a..f3c6fe9e 100644 --- a/registry/storage/catalog.go +++ b/registry/storage/catalog.go @@ -1,12 +1,12 @@ package storage import ( + "context" "errors" "io" "path" "strings" - "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" ) diff --git a/registry/storage/catalog_test.go b/registry/storage/catalog_test.go index 90dc12ce..696e024e 100644 --- a/registry/storage/catalog_test.go +++ b/registry/storage/catalog_test.go @@ -1,13 +1,13 @@ package storage import ( + "context" "fmt" "io" "math/rand" "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver" diff --git a/registry/storage/driver/azure/azure.go b/registry/storage/driver/azure/azure.go index 45d2b1e5..a37a22bd 100644 --- a/registry/storage/driver/azure/azure.go +++ b/registry/storage/driver/azure/azure.go @@ -5,6 +5,7 @@ package azure import ( "bufio" "bytes" + "context" "fmt" "io" "io/ioutil" @@ -12,7 +13,6 @@ import ( "strings" "time" - "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" diff --git a/registry/storage/driver/base/base.go b/registry/storage/driver/base/base.go index e14f7edb..1778c6c5 100644 --- a/registry/storage/driver/base/base.go +++ b/registry/storage/driver/base/base.go @@ -38,9 +38,10 @@ package base import ( + "context" "io" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -79,7 +80,7 @@ func (base *Base) setDriverName(e error) error { // GetContent wraps GetContent of underlying storage driver. func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { - ctx, done := context.WithTrace(ctx) + ctx, done := dcontext.WithTrace(ctx) defer done("%s.GetContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { @@ -92,7 +93,7 @@ func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { // PutContent wraps PutContent of underlying storage driver. func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { - ctx, done := context.WithTrace(ctx) + ctx, done := dcontext.WithTrace(ctx) defer done("%s.PutContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { @@ -104,7 +105,7 @@ func (base *Base) PutContent(ctx context.Context, path string, content []byte) e // Reader wraps Reader of underlying storage driver. func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - ctx, done := context.WithTrace(ctx) + ctx, done := dcontext.WithTrace(ctx) defer done("%s.Reader(%q, %d)", base.Name(), path, offset) if offset < 0 { @@ -121,7 +122,7 @@ func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.Rea // Writer wraps Writer of underlying storage driver. func (base *Base) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { - ctx, done := context.WithTrace(ctx) + ctx, done := dcontext.WithTrace(ctx) defer done("%s.Writer(%q, %v)", base.Name(), path, append) if !storagedriver.PathRegexp.MatchString(path) { @@ -134,7 +135,7 @@ func (base *Base) Writer(ctx context.Context, path string, append bool) (storage // Stat wraps Stat of underlying storage driver. func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - ctx, done := context.WithTrace(ctx) + ctx, done := dcontext.WithTrace(ctx) defer done("%s.Stat(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) && path != "/" { @@ -147,7 +148,7 @@ func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo // List wraps List of underlying storage driver. func (base *Base) List(ctx context.Context, path string) ([]string, error) { - ctx, done := context.WithTrace(ctx) + ctx, done := dcontext.WithTrace(ctx) defer done("%s.List(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) && path != "/" { @@ -160,7 +161,7 @@ func (base *Base) List(ctx context.Context, path string) ([]string, error) { // Move wraps Move of underlying storage driver. func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { - ctx, done := context.WithTrace(ctx) + ctx, done := dcontext.WithTrace(ctx) defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) if !storagedriver.PathRegexp.MatchString(sourcePath) { @@ -174,7 +175,7 @@ func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) // Delete wraps Delete of underlying storage driver. func (base *Base) Delete(ctx context.Context, path string) error { - ctx, done := context.WithTrace(ctx) + ctx, done := dcontext.WithTrace(ctx) defer done("%s.Delete(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { @@ -186,7 +187,7 @@ func (base *Base) Delete(ctx context.Context, path string) error { // URLFor wraps URLFor of underlying storage driver. func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - ctx, done := context.WithTrace(ctx) + ctx, done := dcontext.WithTrace(ctx) defer done("%s.URLFor(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { diff --git a/registry/storage/driver/base/regulator.go b/registry/storage/driver/base/regulator.go index 1e929f83..97a30ae4 100644 --- a/registry/storage/driver/base/regulator.go +++ b/registry/storage/driver/base/regulator.go @@ -1,10 +1,10 @@ package base import ( + "context" "io" "sync" - "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" ) diff --git a/registry/storage/driver/filesystem/driver.go b/registry/storage/driver/filesystem/driver.go index 649e2bc2..71c75fba 100644 --- a/registry/storage/driver/filesystem/driver.go +++ b/registry/storage/driver/filesystem/driver.go @@ -3,6 +3,7 @@ package filesystem import ( "bufio" "bytes" + "context" "fmt" "io" "io/ioutil" @@ -12,7 +13,6 @@ import ( "strconv" "time" - "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" diff --git a/registry/storage/driver/gcs/gcs.go b/registry/storage/driver/gcs/gcs.go index 8a0be4e9..dadae79c 100644 --- a/registry/storage/driver/gcs/gcs.go +++ b/registry/storage/driver/gcs/gcs.go @@ -16,6 +16,7 @@ package gcs import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -29,20 +30,16 @@ import ( "strings" "time" - "golang.org/x/net/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/sirupsen/logrus" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" "google.golang.org/api/googleapi" "google.golang.org/cloud" "google.golang.org/cloud/storage" - - "github.com/sirupsen/logrus" - - ctx "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" ) const ( @@ -194,7 +191,7 @@ func (d *driver) Name() string { // GetContent retrieves the content stored at "path" as a []byte. // This should primarily be used for small objects. -func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { +func (d *driver) GetContent(context context.Context, path string) ([]byte, error) { gcsContext := d.context(context) name := d.pathToKey(path) var rc io.ReadCloser @@ -220,7 +217,7 @@ func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". // This should primarily be used for small objects. -func (d *driver) PutContent(context ctx.Context, path string, contents []byte) error { +func (d *driver) PutContent(context context.Context, path string, contents []byte) error { return retry(func() error { wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) wc.ContentType = "application/octet-stream" @@ -231,7 +228,7 @@ func (d *driver) PutContent(context ctx.Context, path string, contents []byte) e // Reader retrieves an io.ReadCloser for the content stored at "path" // with a given byte offset. // May be used to resume reading a stream by providing a nonzero offset. -func (d *driver) Reader(context ctx.Context, path string, offset int64) (io.ReadCloser, error) { +func (d *driver) Reader(context context.Context, path string, offset int64) (io.ReadCloser, error) { res, err := getObject(d.client, d.bucket, d.pathToKey(path), offset) if err != nil { if res != nil { @@ -290,7 +287,7 @@ func getObject(client *http.Client, bucket string, name string, offset int64) (* // Writer returns a FileWriter which will store the content written to it // at the location designated by "path" after the call to Commit. -func (d *driver) Writer(context ctx.Context, path string, append bool) (storagedriver.FileWriter, error) { +func (d *driver) Writer(context context.Context, path string, append bool) (storagedriver.FileWriter, error) { writer := &writer{ client: d.client, bucket: d.bucket, @@ -542,7 +539,7 @@ func retry(req request) error { // Stat retrieves the FileInfo for the given path, including the current // size in bytes and the creation time. -func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, error) { +func (d *driver) Stat(context context.Context, path string) (storagedriver.FileInfo, error) { var fi storagedriver.FileInfoFields //try to get as file gcsContext := d.context(context) @@ -588,7 +585,7 @@ func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, // List returns a list of the objects that are direct descendants of the //given path. -func (d *driver) List(context ctx.Context, path string) ([]string, error) { +func (d *driver) List(context context.Context, path string) ([]string, error) { var query *storage.Query query = &storage.Query{} query.Delimiter = "/" @@ -626,7 +623,7 @@ func (d *driver) List(context ctx.Context, path string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the // original object. -func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) error { +func (d *driver) Move(context context.Context, sourcePath string, destPath string) error { gcsContext := d.context(context) _, err := storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) if err != nil { @@ -674,7 +671,7 @@ func (d *driver) listAll(context context.Context, prefix string) ([]string, erro } // Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(context ctx.Context, path string) error { +func (d *driver) Delete(context context.Context, path string) error { prefix := d.pathToDirKey(path) gcsContext := d.context(context) keys, err := d.listAll(gcsContext, prefix) @@ -749,7 +746,7 @@ func storageCopyObject(context context.Context, srcBucket, srcName string, destB // URLFor returns a URL which may be used to retrieve the content stored at // the given path, possibly using the given options. // Returns ErrUnsupportedMethod if this driver has no privateKey -func (d *driver) URLFor(context ctx.Context, path string, options map[string]interface{}) (string, error) { +func (d *driver) URLFor(context context.Context, path string, options map[string]interface{}) (string, error) { if d.privateKey == nil { return "", storagedriver.ErrUnsupportedMethod{} } @@ -856,7 +853,7 @@ func putChunk(client *http.Client, sessionURI string, chunk []byte, from int64, return bytesPut, err } -func (d *driver) context(context ctx.Context) context.Context { +func (d *driver) context(context context.Context) context.Context { return cloud.WithContext(context, dummyProjectID, d.client) } diff --git a/registry/storage/driver/gcs/gcs_test.go b/registry/storage/driver/gcs/gcs_test.go index f2808d5f..e58216be 100644 --- a/registry/storage/driver/gcs/gcs_test.go +++ b/registry/storage/driver/gcs/gcs_test.go @@ -3,12 +3,12 @@ package gcs import ( + "fmt" "io/ioutil" "os" "testing" - "fmt" - ctx "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" "golang.org/x/oauth2" @@ -49,7 +49,7 @@ func init() { var email string var privateKey []byte - ts, err = google.DefaultTokenSource(ctx.Background(), storage.ScopeFullControl) + ts, err = google.DefaultTokenSource(dcontext.Background(), storage.ScopeFullControl) if err != nil { // Assume that the file contents are within the environment variable since it exists // but does not contain a valid file path @@ -65,7 +65,7 @@ func init() { if email == "" { panic("Error reading JWT config : missing client_email property") } - ts = jwtConfig.TokenSource(ctx.Background()) + ts = jwtConfig.TokenSource(dcontext.Background()) } gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { @@ -74,7 +74,7 @@ func init() { rootDirectory: root, email: email, privateKey: privateKey, - client: oauth2.NewClient(ctx.Background(), ts), + client: oauth2.NewClient(dcontext.Background(), ts), chunkSize: defaultChunkSize, } @@ -104,7 +104,7 @@ func TestCommitEmpty(t *testing.T) { } filename := "/test" - ctx := ctx.Background() + ctx := dcontext.Background() writer, err := driver.Writer(ctx, filename, false) defer driver.Delete(ctx, filename) @@ -150,7 +150,7 @@ func TestCommit(t *testing.T) { } filename := "/test" - ctx := ctx.Background() + ctx := dcontext.Background() contents := make([]byte, defaultChunkSize) writer, err := driver.Writer(ctx, filename, false) @@ -247,7 +247,7 @@ func TestEmptyRootList(t *testing.T) { filename := "/test" contents := []byte("contents") - ctx := ctx.Background() + ctx := dcontext.Background() err = rootedDriver.PutContent(ctx, filename, contents) if err != nil { t.Fatalf("unexpected error creating content: %v", err) @@ -290,7 +290,7 @@ func TestMoveDirectory(t *testing.T) { t.Fatalf("unexpected error creating rooted driver: %v", err) } - ctx := ctx.Background() + ctx := dcontext.Background() contents := []byte("contents") // Create a regular file. err = driver.PutContent(ctx, "/parent/dir/foo", contents) diff --git a/registry/storage/driver/inmemory/driver.go b/registry/storage/driver/inmemory/driver.go index eb2fd1cf..14bc3940 100644 --- a/registry/storage/driver/inmemory/driver.go +++ b/registry/storage/driver/inmemory/driver.go @@ -1,13 +1,13 @@ package inmemory import ( + "context" "fmt" "io" "io/ioutil" "sync" "time" - "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" diff --git a/registry/storage/driver/middleware/cloudfront/middleware.go b/registry/storage/driver/middleware/cloudfront/middleware.go index b0618d1a..61e787a4 100644 --- a/registry/storage/driver/middleware/cloudfront/middleware.go +++ b/registry/storage/driver/middleware/cloudfront/middleware.go @@ -4,6 +4,7 @@ package middleware import ( + "context" "crypto/x509" "encoding/pem" "fmt" @@ -13,7 +14,7 @@ import ( "time" "github.com/aws/aws-sdk-go/service/cloudfront/sign" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" ) @@ -119,7 +120,7 @@ func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, // TODO(endophage): currently only supports S3 keyer, ok := lh.StorageDriver.(S3BucketKeyer) if !ok { - context.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") + dcontext.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") return lh.StorageDriver.URLFor(ctx, path, options) } diff --git a/registry/storage/driver/middleware/redirect/middleware.go b/registry/storage/driver/middleware/redirect/middleware.go index 20cd7daa..8f63674c 100644 --- a/registry/storage/driver/middleware/redirect/middleware.go +++ b/registry/storage/driver/middleware/redirect/middleware.go @@ -1,10 +1,10 @@ package middleware import ( + "context" "fmt" "net/url" - "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" ) diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index 6c0c35a5..f79e3537 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -13,6 +13,7 @@ package oss import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -22,8 +23,6 @@ import ( "strings" "time" - "github.com/docker/distribution/context" - "github.com/denverdino/aliyungo/oss" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" diff --git a/registry/storage/driver/oss/oss_test.go b/registry/storage/driver/oss/oss_test.go index fbae5d9c..438d9a48 100644 --- a/registry/storage/driver/oss/oss_test.go +++ b/registry/storage/driver/oss/oss_test.go @@ -4,16 +4,14 @@ package oss import ( "io/ioutil" + "os" + "strconv" + "testing" alioss "github.com/denverdino/aliyungo/oss" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" - //"log" - "os" - "strconv" - "testing" - "gopkg.in/check.v1" ) diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 19407d80..33312afc 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -13,6 +13,7 @@ package s3 import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -33,7 +34,6 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/docker/distribution/context" "github.com/docker/distribution/registry/client/transport" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" diff --git a/registry/storage/driver/s3-goamz/s3.go b/registry/storage/driver/s3-goamz/s3.go index b16ca49a..4850f6d7 100644 --- a/registry/storage/driver/s3-goamz/s3.go +++ b/registry/storage/driver/s3-goamz/s3.go @@ -14,6 +14,7 @@ package s3 import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -23,14 +24,12 @@ import ( "strings" "time" - "github.com/docker/goamz/aws" - "github.com/docker/goamz/s3" - - "github.com/docker/distribution/context" "github.com/docker/distribution/registry/client/transport" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" + "github.com/docker/goamz/aws" + "github.com/docker/goamz/s3" ) const driverName = "s3goamz" diff --git a/registry/storage/driver/storagedriver.go b/registry/storage/driver/storagedriver.go index 548a17d8..4b570dd6 100644 --- a/registry/storage/driver/storagedriver.go +++ b/registry/storage/driver/storagedriver.go @@ -1,13 +1,12 @@ package driver import ( + "context" "fmt" "io" "regexp" "strconv" "strings" - - "github.com/docker/distribution/context" ) // Version is a string representing the storage driver version, of the form diff --git a/registry/storage/driver/swift/swift.go b/registry/storage/driver/swift/swift.go index 4b7aa4e9..11a33264 100644 --- a/registry/storage/driver/swift/swift.go +++ b/registry/storage/driver/swift/swift.go @@ -18,6 +18,7 @@ package swift import ( "bufio" "bytes" + "context" "crypto/rand" "crypto/sha1" "crypto/tls" @@ -34,7 +35,6 @@ import ( "github.com/mitchellh/mapstructure" "github.com/ncw/swift" - "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" diff --git a/registry/storage/driver/testdriver/testdriver.go b/registry/storage/driver/testdriver/testdriver.go index 988e5d33..91254627 100644 --- a/registry/storage/driver/testdriver/testdriver.go +++ b/registry/storage/driver/testdriver/testdriver.go @@ -1,7 +1,8 @@ package testdriver import ( - "github.com/docker/distribution/context" + "context" + storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" "github.com/docker/distribution/registry/storage/driver/inmemory" diff --git a/registry/storage/driver/testsuites/testsuites.go b/registry/storage/driver/testsuites/testsuites.go index d8afe0c8..4f2f1ab0 100644 --- a/registry/storage/driver/testsuites/testsuites.go +++ b/registry/storage/driver/testsuites/testsuites.go @@ -2,6 +2,7 @@ package testsuites import ( "bytes" + "context" "crypto/sha1" "io" "io/ioutil" @@ -15,10 +16,8 @@ import ( "testing" "time" - "gopkg.in/check.v1" - - "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" + "gopkg.in/check.v1" ) // Test hooks up gocheck into the "go test" runner. diff --git a/registry/storage/filereader.go b/registry/storage/filereader.go index 3b06c817..2050e43d 100644 --- a/registry/storage/filereader.go +++ b/registry/storage/filereader.go @@ -3,12 +3,12 @@ package storage import ( "bufio" "bytes" + "context" "fmt" "io" "io/ioutil" "os" - "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" ) diff --git a/registry/storage/garbagecollect.go b/registry/storage/garbagecollect.go index 39289893..ada48654 100644 --- a/registry/storage/garbagecollect.go +++ b/registry/storage/garbagecollect.go @@ -1,10 +1,10 @@ package storage import ( + "context" "fmt" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" "github.com/opencontainers/go-digest" diff --git a/registry/storage/io.go b/registry/storage/io.go index c1be3b77..f79e7a6f 100644 --- a/registry/storage/io.go +++ b/registry/storage/io.go @@ -1,11 +1,11 @@ package storage import ( + "context" "errors" "io" "io/ioutil" - "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" ) diff --git a/registry/storage/linkedblobstore.go b/registry/storage/linkedblobstore.go index a1929eed..329163ba 100644 --- a/registry/storage/linkedblobstore.go +++ b/registry/storage/linkedblobstore.go @@ -1,13 +1,14 @@ package storage import ( + "context" "fmt" "net/http" "path" "time" "github.com/docker/distribution" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/uuid" @@ -86,7 +87,7 @@ func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) // Place the data in the blob store first. desc, err := lbs.blobStore.Put(ctx, mediaType, p) if err != nil { - context.GetLogger(ctx).Errorf("error putting into main store: %v", err) + dcontext.GetLogger(ctx).Errorf("error putting into main store: %v", err) return distribution.Descriptor{}, err } @@ -125,7 +126,7 @@ func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { // Writer begins a blob write session, returning a handle. func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") + dcontext.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") var opts distribution.CreateOptions @@ -174,7 +175,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution. } func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") + dcontext.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") startedAtPath, err := pathFor(uploadStartedAtPathSpec{ name: lbs.repository.Named().Name(), @@ -411,7 +412,7 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis if target != dgst { // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. - context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) + dcontext.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) } // TODO(stevvooe): Look up repository local mediatype and replace that on diff --git a/registry/storage/linkedblobstore_test.go b/registry/storage/linkedblobstore_test.go index a059a778..85376f71 100644 --- a/registry/storage/linkedblobstore_test.go +++ b/registry/storage/linkedblobstore_test.go @@ -1,6 +1,7 @@ package storage import ( + "context" "fmt" "io" "reflect" @@ -8,11 +9,9 @@ import ( "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/opencontainers/go-digest" - "github.com/docker/distribution/reference" "github.com/docker/distribution/testutil" + "github.com/opencontainers/go-digest" ) func TestLinkedBlobStoreCreateWithMountFrom(t *testing.T) { diff --git a/registry/storage/manifestlisthandler.go b/registry/storage/manifestlisthandler.go index aee73b85..085ccccc 100644 --- a/registry/storage/manifestlisthandler.go +++ b/registry/storage/manifestlisthandler.go @@ -1,11 +1,12 @@ package storage import ( + "context" + "encoding/json" "fmt" - "encoding/json" "github.com/docker/distribution" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/manifest/manifestlist" "github.com/opencontainers/go-digest" ) @@ -20,7 +21,7 @@ type manifestListHandler struct { var _ ManifestHandler = &manifestListHandler{} func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") + dcontext.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") var m manifestlist.DeserializedManifestList if err := json.Unmarshal(content, &m); err != nil { @@ -31,7 +32,7 @@ func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest } func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") + dcontext.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") m, ok := manifestList.(*manifestlist.DeserializedManifestList) if !ok { @@ -49,7 +50,7 @@ func (ms *manifestListHandler) Put(ctx context.Context, manifestList distributio revision, err := ms.blobStore.Put(ctx, mt, payload) if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + dcontext.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) return "", err } diff --git a/registry/storage/manifeststore.go b/registry/storage/manifeststore.go index 4cca5157..20e34eb0 100644 --- a/registry/storage/manifeststore.go +++ b/registry/storage/manifeststore.go @@ -1,11 +1,12 @@ package storage import ( + "context" + "encoding/json" "fmt" - "encoding/json" "github.com/docker/distribution" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" @@ -53,7 +54,7 @@ type manifestStore struct { var _ distribution.ManifestService = &manifestStore{} func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") + dcontext.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") _, err := ms.blobStore.Stat(ms.ctx, dgst) if err != nil { @@ -68,7 +69,7 @@ func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, } func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") + dcontext.GetLogger(ms.ctx).Debug("(*manifestStore).Get") // TODO(stevvooe): Need to check descriptor from above to ensure that the // mediatype is as we expect for the manifest store. @@ -109,7 +110,7 @@ func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options .. } func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") + dcontext.GetLogger(ms.ctx).Debug("(*manifestStore).Put") switch manifest.(type) { case *schema1.SignedManifest: @@ -125,7 +126,7 @@ func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest // Delete removes the revision of the specified manifest. func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") + dcontext.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") return ms.blobStore.Delete(ctx, dgst) } diff --git a/registry/storage/manifeststore_test.go b/registry/storage/manifeststore_test.go index e398058b..2fad7611 100644 --- a/registry/storage/manifeststore_test.go +++ b/registry/storage/manifeststore_test.go @@ -2,12 +2,12 @@ package storage import ( "bytes" + "context" "io" "reflect" "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" diff --git a/registry/storage/purgeuploads.go b/registry/storage/purgeuploads.go index d6cae4e7..2396e803 100644 --- a/registry/storage/purgeuploads.go +++ b/registry/storage/purgeuploads.go @@ -1,14 +1,14 @@ package storage import ( + "context" "path" "strings" "time" - "github.com/docker/distribution/context" storageDriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/uuid" - log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus" ) // uploadData stored the location of temporary files created during a layer upload @@ -30,13 +30,13 @@ func newUploadData() uploadData { // created before olderThan. The list of files deleted and errors // encountered are returned func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { - log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) + logrus.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) uploadData, errors := getOutstandingUploads(ctx, driver) var deleted []string for _, uploadData := range uploadData { if uploadData.startedAt.Before(olderThan) { var err error - log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", + logrus.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", uploadData.containingDir, uploadData.startedAt, olderThan) if actuallyDelete { err = driver.Delete(ctx, uploadData.containingDir) @@ -49,7 +49,7 @@ func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, older } } - log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) + logrus.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) return deleted, errors } diff --git a/registry/storage/purgeuploads_test.go b/registry/storage/purgeuploads_test.go index 3b70f723..069f212d 100644 --- a/registry/storage/purgeuploads_test.go +++ b/registry/storage/purgeuploads_test.go @@ -1,12 +1,12 @@ package storage import ( + "context" "path" "strings" "testing" "time" - "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/uuid" diff --git a/registry/storage/registry.go b/registry/storage/registry.go index 20525ffb..46b96853 100644 --- a/registry/storage/registry.go +++ b/registry/storage/registry.go @@ -1,10 +1,10 @@ package storage import ( + "context" "regexp" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" diff --git a/registry/storage/schema2manifesthandler.go b/registry/storage/schema2manifesthandler.go index 05c53254..b0ae01ae 100644 --- a/registry/storage/schema2manifesthandler.go +++ b/registry/storage/schema2manifesthandler.go @@ -1,13 +1,14 @@ package storage import ( + "context" "encoding/json" "errors" "fmt" "net/url" "github.com/docker/distribution" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/opencontainers/go-digest" @@ -30,7 +31,7 @@ type schema2ManifestHandler struct { var _ ManifestHandler = &schema2ManifestHandler{} func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") + dcontext.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") var m schema2.DeserializedManifest if err := json.Unmarshal(content, &m); err != nil { @@ -41,7 +42,7 @@ func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Dig } func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") + dcontext.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") m, ok := manifest.(*schema2.DeserializedManifest) if !ok { @@ -59,7 +60,7 @@ func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution revision, err := ms.blobStore.Put(ctx, mt, payload) if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + dcontext.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) return "", err } diff --git a/registry/storage/signedmanifesthandler.go b/registry/storage/signedmanifesthandler.go index 6ca1c6c8..f94ecbea 100644 --- a/registry/storage/signedmanifesthandler.go +++ b/registry/storage/signedmanifesthandler.go @@ -1,11 +1,12 @@ package storage import ( + "context" "encoding/json" "fmt" "github.com/docker/distribution" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/libtrust" @@ -24,7 +25,7 @@ type signedManifestHandler struct { var _ ManifestHandler = &signedManifestHandler{} func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") + dcontext.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") var ( signatures [][]byte @@ -56,7 +57,7 @@ func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Dige } func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") + dcontext.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") sm, ok := manifest.(*schema1.SignedManifest) if !ok { @@ -72,7 +73,7 @@ func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution. revision, err := ms.blobStore.Put(ctx, mt, payload) if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + dcontext.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) return "", err } diff --git a/registry/storage/tagstore.go b/registry/storage/tagstore.go index d7327886..3fbed6d3 100644 --- a/registry/storage/tagstore.go +++ b/registry/storage/tagstore.go @@ -1,10 +1,10 @@ package storage import ( + "context" "path" "github.com/docker/distribution" - "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/opencontainers/go-digest" ) diff --git a/registry/storage/tagstore_test.go b/registry/storage/tagstore_test.go index 396441ee..5bfa1451 100644 --- a/registry/storage/tagstore_test.go +++ b/registry/storage/tagstore_test.go @@ -1,10 +1,10 @@ package storage import ( + "context" "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver/inmemory" ) diff --git a/registry/storage/util.go b/registry/storage/util.go index 773d7ba0..8ab235e2 100644 --- a/registry/storage/util.go +++ b/registry/storage/util.go @@ -1,7 +1,8 @@ package storage import ( - "github.com/docker/distribution/context" + "context" + "github.com/docker/distribution/registry/storage/driver" ) diff --git a/registry/storage/vacuum.go b/registry/storage/vacuum.go index 42c8ef60..d2ebc539 100644 --- a/registry/storage/vacuum.go +++ b/registry/storage/vacuum.go @@ -1,9 +1,10 @@ package storage import ( + "context" "path" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" "github.com/opencontainers/go-digest" ) @@ -39,7 +40,7 @@ func (v Vacuum) RemoveBlob(dgst string) error { return err } - context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) + dcontext.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) err = v.driver.Delete(v.ctx, blobPath) if err != nil { @@ -57,7 +58,7 @@ func (v Vacuum) RemoveRepository(repoName string) error { return err } repoDir := path.Join(rootForRepository, repoName) - context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) + dcontext.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) err = v.driver.Delete(v.ctx, repoDir) if err != nil { return err diff --git a/registry/storage/walk.go b/registry/storage/walk.go index d979796e..3322d16a 100644 --- a/registry/storage/walk.go +++ b/registry/storage/walk.go @@ -1,11 +1,11 @@ package storage import ( + "context" "errors" "fmt" "sort" - "github.com/docker/distribution/context" storageDriver "github.com/docker/distribution/registry/storage/driver" ) diff --git a/registry/storage/walk_test.go b/registry/storage/walk_test.go index 3d7a4b1b..ca9caae0 100644 --- a/registry/storage/walk_test.go +++ b/registry/storage/walk_test.go @@ -1,11 +1,11 @@ package storage import ( + "context" "fmt" "sort" "testing" - "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" ) diff --git a/tags.go b/tags.go index 50305659..f22df2b8 100644 --- a/tags.go +++ b/tags.go @@ -1,7 +1,7 @@ package distribution import ( - "github.com/docker/distribution/context" + "context" ) // TagService provides access to information about tagged objects. From 860b28c5b9698bb7e9d5e9b9bc83f15319425723 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 14 Aug 2017 18:30:46 -0700 Subject: [PATCH 052/276] registry: remove dependency on logrus for client To simplify the vendoring story for the client, we have now removed the requirement for `logrus` and the forked `context` package (usually imported as `dcontext`). We inject the logger via the metrics tracker for the blob cache and via options on the token handler. We preserve logs on the proxy cache for that case. Clients expecting these log messages may need to be updated accordingly. Signed-off-by: Stephen J Day --- registry/client/auth/session.go | 21 ++++++++++++--- registry/proxy/proxyregistry.go | 15 ++++++++++- registry/storage/blobcachemetrics.go | 6 +++++ .../cache/cachedblobdescriptorstore.go | 27 ++++++++++++++++--- 4 files changed, 61 insertions(+), 8 deletions(-) diff --git a/registry/client/auth/session.go b/registry/client/auth/session.go index be474d82..bc7b66db 100644 --- a/registry/client/auth/session.go +++ b/registry/client/auth/session.go @@ -13,7 +13,6 @@ import ( "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" - "github.com/sirupsen/logrus" ) var ( @@ -135,6 +134,8 @@ type tokenHandler struct { tokenLock sync.Mutex tokenCache string tokenExpiration time.Time + + logger Logger } // Scope is a type which is serializable to a string @@ -176,6 +177,18 @@ func (rs RegistryScope) String() string { return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) } +// Logger defines the injectable logging interface, used on TokenHandlers. +type Logger interface { + Debugf(format string, args ...interface{}) +} + +func logDebugf(logger Logger, format string, args ...interface{}) { + if logger == nil { + return + } + logger.Debugf(format, args...) +} + // TokenHandlerOptions is used to configure a new token handler type TokenHandlerOptions struct { Transport http.RoundTripper @@ -185,6 +198,7 @@ type TokenHandlerOptions struct { ForceOAuth bool ClientID string Scopes []Scope + Logger Logger } // An implementation of clock for providing real time data. @@ -220,6 +234,7 @@ func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandl clientID: options.ClientID, scopes: options.Scopes, clock: realClock{}, + logger: options.Logger, } return handler @@ -348,7 +363,7 @@ func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, servic if tr.ExpiresIn < minimumTokenLifetimeSeconds { // The default/minimum lifetime. tr.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) } if tr.IssuedAt.IsZero() { @@ -439,7 +454,7 @@ func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, if tr.ExpiresIn < minimumTokenLifetimeSeconds { // The default/minimum lifetime. tr.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) + logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) } if tr.IssuedAt.IsZero() { diff --git a/registry/proxy/proxyregistry.go b/registry/proxy/proxyregistry.go index aec220e8..a6413fda 100644 --- a/registry/proxy/proxyregistry.go +++ b/registry/proxy/proxyregistry.go @@ -121,8 +121,21 @@ func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, la func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) { c := pr.authChallenger + tkopts := auth.TokenHandlerOptions{ + Transport: http.DefaultTransport, + Credentials: c.credentialStore(), + Scopes: []auth.Scope{ + auth.RepositoryScope{ + Repository: name.Name(), + Actions: []string{"pull"}, + }, + }, + Logger: dcontext.GetLogger(ctx), + } + tr := transport.NewTransport(http.DefaultTransport, - auth.NewAuthorizer(c.challengeManager(), auth.NewTokenHandler(http.DefaultTransport, c.credentialStore(), name.Name(), "pull"))) + auth.NewAuthorizer(c.challengeManager(), + auth.NewTokenHandlerWithOptions(tkopts))) localRepo, err := pr.embedded.Repository(ctx, name) if err != nil { diff --git a/registry/storage/blobcachemetrics.go b/registry/storage/blobcachemetrics.go index fad0a77a..238b5806 100644 --- a/registry/storage/blobcachemetrics.go +++ b/registry/storage/blobcachemetrics.go @@ -1,9 +1,11 @@ package storage import ( + "context" "expvar" "sync/atomic" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/cache" ) @@ -25,6 +27,10 @@ func (bsc *blobStatCollector) Metrics() cache.Metrics { return bsc.metrics } +func (bsc *blobStatCollector) Logger(ctx context.Context) cache.Logger { + return dcontext.GetLogger(ctx) +} + // blobStatterCacheMetrics keeps track of cache metrics for blob descriptor // cache requests. Note this is kept globally and made available via expvar. // For more detailed metrics, its recommend to instrument a particular cache diff --git a/registry/storage/cache/cachedblobdescriptorstore.go b/registry/storage/cache/cachedblobdescriptorstore.go index 668eebda..cdc34f5f 100644 --- a/registry/storage/cache/cachedblobdescriptorstore.go +++ b/registry/storage/cache/cachedblobdescriptorstore.go @@ -4,7 +4,6 @@ import ( "context" "github.com/docker/distribution" - dcontext "github.com/docker/distribution/context" "github.com/opencontainers/go-digest" ) @@ -17,12 +16,20 @@ type Metrics struct { Misses uint64 } +// Logger can be provided on the MetricsTracker to log errors. +// +// Usually, this is just a proxy to dcontext.GetLogger. +type Logger interface { + Errorf(format string, args ...interface{}) +} + // MetricsTracker represents a metric tracker // which simply counts the number of hits and misses. type MetricsTracker interface { Hit() Miss() Metrics() Metrics + Logger(context.Context) Logger } type cachedBlobStatter struct { @@ -54,7 +61,7 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di desc, err := cbds.cache.Stat(ctx, dgst) if err != nil { if err != distribution.ErrBlobUnknown { - dcontext.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) + logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) } goto fallback @@ -74,7 +81,7 @@ fallback: } if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - dcontext.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) } return desc, err @@ -96,7 +103,19 @@ func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) er func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - dcontext.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) + logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) } return nil } + +func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { + if tracker == nil { + return + } + + logger := tracker.Logger(ctx) + if logger == nil { + return + } + logger.Errorf(format, args...) +} From 3f771adca694af0597e3f5ee5f771a101fcb3ded Mon Sep 17 00:00:00 2001 From: Leonardo Azize Martins Date: Thu, 17 Aug 2017 08:16:27 +0200 Subject: [PATCH 053/276] Fix registry version argument Signed-off-by: Leonardo Azize Martins --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1f1441be..afe4e92a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -51,7 +51,7 @@ By following these simple rules you will get better and faster feedback on your 2. copy the output of: - `docker version` - `docker info` - - `docker exec registry -version` + - `docker exec registry --version` 3. copy the command line you used to launch your Registry 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) 5. reproduce your problem and get your docker daemon logs showing the error From 1618b49d5b01aa3a84d948f28f7f796cb6fa1909 Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Mon, 21 Aug 2017 15:04:31 -0700 Subject: [PATCH 054/276] registry/handlers: ignore notfound on storage driver healthcheck Signed-off-by: Stephen J Day --- registry/handlers/app.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/registry/handlers/app.go b/registry/handlers/app.go index 813bae16..e38050cb 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -346,7 +346,10 @@ func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { storageDriverCheck := func() error { _, err := app.driver.Stat(app, "/") // "/" should always exist - return err // any error will be treated as failure + if _, ok := err.(storagedriver.PathNotFoundError); ok { + err = nil // pass this through, backend is responding, but this path doesn't exist. + } + return err } if app.Config.Health.StorageDriver.Threshold != 0 { From 23f8ca88e1a39f34d7fe84d122eb7897b30f659b Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Wed, 23 Aug 2017 19:24:24 -0400 Subject: [PATCH 055/276] If the request already has the scope, don't force token fetch AuthorizeRequest() injects the 'pull' scope if `from` is set unconditionally. If the current token already has that scope, it will be inserted into the scope list twice and `addedScopes` will be set to true, resulting in a new token being fetched that has no net new scopes. Instead, check whether `additionalScopes` are actually new. Signed-off-by: Clayton Coleman --- registry/client/auth/session.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/registry/client/auth/session.go b/registry/client/auth/session.go index bc7b66db..db86c9b0 100644 --- a/registry/client/auth/session.go +++ b/registry/client/auth/session.go @@ -279,6 +279,9 @@ func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...s } var addedScopes bool for _, scope := range additionalScopes { + if hasScope(scopes, scope) { + continue + } scopes = append(scopes, scope) addedScopes = true } @@ -302,6 +305,15 @@ func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...s return th.tokenCache, nil } +func hasScope(scopes []string, scope string) bool { + for _, s := range scopes { + if s == scope { + return true + } + } + return false +} + type postTokenResponse struct { AccessToken string `json:"access_token"` RefreshToken string `json:"refresh_token"` From a2015272c1442aec6e8e3049dd4694a5331e37eb Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Fri, 25 Aug 2017 17:15:10 -0400 Subject: [PATCH 056/276] Support HEAD requests without Docker-Content-Digest header A statically hosted registry that responds correctly to GET with a manifest will load the right digest (by looking at the manifest body and calculating the digest). If the registry returns a HEAD without `Docker-Content-Digest`, then the client Tags().Get() call will return an empty digest. This commit changes the client to fallback to loading the tag via GET if the `Docker-Content-Digest` header is not set. Signed-off-by: Clayton Coleman --- registry/client/repository.go | 3 +- registry/client/repository_test.go | 61 ++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/registry/client/repository.go b/registry/client/repository.go index 8bd2c3fb..9846c42c 100644 --- a/registry/client/repository.go +++ b/registry/client/repository.go @@ -321,7 +321,8 @@ func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, er defer resp.Body.Close() switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400: + case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: + // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers return descriptorFromResponse(resp) default: // if the response is an error - there will be no body to decode. diff --git a/registry/client/repository_test.go b/registry/client/repository_test.go index f22fa33d..45ef24cd 100644 --- a/registry/client/repository_test.go +++ b/registry/client/repository_test.go @@ -647,7 +647,38 @@ func addTestManifest(repo reference.Named, reference string, mediatype string, c }), }, }) +} +func addTestManifestWithoutDigestHeader(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, + }), + }, + }) + *m = append(*m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/manifests/" + reference, + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, + }), + }, + }) } func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { @@ -994,6 +1025,36 @@ func TestObtainsErrorForMissingTag(t *testing.T) { } } +func TestObtainsManifestForTagWithoutHeaders(t *testing.T) { + repo, _ := reference.WithName("test.example.com/repo") + + var m testutil.RequestResponseMap + m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + _, pl, err := m1.Payload() + if err != nil { + t.Fatal(err) + } + addTestManifestWithoutDigestHeader(repo, "1.0.0", schema1.MediaTypeSignedManifest, pl, &m) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + + tagService := r.Tags(ctx) + + desc, err := tagService.Get(ctx, "1.0.0") + if err != nil { + t.Fatalf("Expected no error") + } + if desc.Digest != dgst { + t.Fatalf("Unexpected digest") + } +} func TestManifestTagsPaginated(t *testing.T) { s := httptest.NewServer(http.NotFoundHandler()) defer s.Close() From 2c58ce1a7f72585aafebc3faf85582fe43280bd3 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Sat, 2 Sep 2017 08:37:34 -0700 Subject: [PATCH 057/276] Remove context in NewRegistry and NewRepository The context parameter was either not used or misused. Signed-off-by: Tibor Vass --- registry/client/repository.go | 8 ++----- registry/client/repository_test.go | 36 +++++++++++++++--------------- registry/proxy/proxyregistry.go | 2 +- 3 files changed, 21 insertions(+), 25 deletions(-) diff --git a/registry/client/repository.go b/registry/client/repository.go index 9846c42c..fc4edab9 100644 --- a/registry/client/repository.go +++ b/registry/client/repository.go @@ -62,7 +62,7 @@ func checkHTTPRedirect(req *http.Request, via []*http.Request) error { } // NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { +func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) { ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err @@ -77,7 +77,6 @@ func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTrippe return ®istry{ client: client, ub: ub, - context: ctx, }, nil } @@ -133,7 +132,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri } // NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { +func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { ub, err := v2.NewURLBuilderFromString(baseURL, false) if err != nil { return nil, err @@ -149,7 +148,6 @@ func NewRepository(ctx context.Context, name reference.Named, baseURL string, tr client: client, ub: ub, name: name, - context: ctx, }, nil } @@ -192,7 +190,6 @@ func (r *repository) Tags(ctx context.Context) distribution.TagService { return &tags{ client: r.client, ub: r.ub, - context: r.context, name: r.Named(), } } @@ -201,7 +198,6 @@ func (r *repository) Tags(ctx context.Context) distribution.TagService { type tags struct { client *http.Client ub *v2.URLBuilder - context context.Context name reference.Named } diff --git a/registry/client/repository_test.go b/registry/client/repository_test.go index 45ef24cd..e5a6dad0 100644 --- a/registry/client/repository_test.go +++ b/registry/client/repository_test.go @@ -118,7 +118,7 @@ func TestBlobDelete(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -140,7 +140,7 @@ func TestBlobFetch(t *testing.T) { ctx := context.Background() repo, _ := reference.WithName("test.example.com/repo1") - r, err := NewRepository(ctx, repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -194,7 +194,7 @@ func TestBlobExistsNoContentLength(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -220,7 +220,7 @@ func TestBlobExists(t *testing.T) { ctx := context.Background() repo, _ := reference.WithName("test.example.com/repo1") - r, err := NewRepository(ctx, repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -325,7 +325,7 @@ func TestBlobUploadChunked(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -435,7 +435,7 @@ func TestBlobUploadMonolithic(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -512,7 +512,7 @@ func TestBlobMount(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -723,7 +723,7 @@ func TestV1ManifestFetch(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -795,7 +795,7 @@ func TestManifestFetchWithEtag(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -836,7 +836,7 @@ func TestManifestDelete(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -899,7 +899,7 @@ func TestManifestPut(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -952,7 +952,7 @@ func TestManifestTags(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -1009,7 +1009,7 @@ func TestObtainsErrorForMissingTag(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -1040,7 +1040,7 @@ func TestObtainsManifestForTagWithoutHeaders(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -1098,7 +1098,7 @@ func TestManifestTagsPaginated(t *testing.T) { s.Config.Handler = testutil.NewHandler(m) - r, err := NewRepository(context.Background(), repo, s.URL, nil) + r, err := NewRepository(repo, s.URL, nil) if err != nil { t.Fatal(err) } @@ -1146,7 +1146,7 @@ func TestManifestUnauthorized(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, nil) + r, err := NewRepository(repo, e, nil) if err != nil { t.Fatal(err) } @@ -1183,7 +1183,7 @@ func TestCatalog(t *testing.T) { entries := make([]string, 5) - r, err := NewRegistry(context.Background(), e, nil) + r, err := NewRegistry(e, nil) if err != nil { t.Fatal(err) } @@ -1215,7 +1215,7 @@ func TestCatalogInParts(t *testing.T) { entries := make([]string, 2) - r, err := NewRegistry(context.Background(), e, nil) + r, err := NewRegistry(e, nil) if err != nil { t.Fatal(err) } diff --git a/registry/proxy/proxyregistry.go b/registry/proxy/proxyregistry.go index a6413fda..e8a8f352 100644 --- a/registry/proxy/proxyregistry.go +++ b/registry/proxy/proxyregistry.go @@ -146,7 +146,7 @@ func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named return nil, err } - remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL.String(), tr) + remoteRepo, err := client.NewRepository(name, pr.remoteURL.String(), tr) if err != nil { return nil, err } From 13e0608bc850f8d5b8f4fa0ed57c7c3f3c8035d2 Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Tue, 5 Sep 2017 13:14:43 -0700 Subject: [PATCH 058/276] gofmt -w -s registry/client/repository.go Signed-off-by: Tibor Vass --- registry/client/repository.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/registry/client/repository.go b/registry/client/repository.go index fc4edab9..f9499552 100644 --- a/registry/client/repository.go +++ b/registry/client/repository.go @@ -75,8 +75,8 @@ func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) } return ®istry{ - client: client, - ub: ub, + client: client, + ub: ub, }, nil } @@ -145,9 +145,9 @@ func NewRepository(name reference.Named, baseURL string, transport http.RoundTri } return &repository{ - client: client, - ub: ub, - name: name, + client: client, + ub: ub, + name: name, }, nil } @@ -188,17 +188,17 @@ func (r *repository) Manifests(ctx context.Context, options ...distribution.Mani func (r *repository) Tags(ctx context.Context) distribution.TagService { return &tags{ - client: r.client, - ub: r.ub, - name: r.Named(), + client: r.client, + ub: r.ub, + name: r.Named(), } } // tags implements remote tagging operations. type tags struct { - client *http.Client - ub *v2.URLBuilder - name reference.Named + client *http.Client + ub *v2.URLBuilder + name reference.Named } // All returns all tags From 4abf680c76151da0f4a9086d44abad655cb1f01e Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Fri, 29 Sep 2017 13:27:49 -0700 Subject: [PATCH 059/276] registry: feed the linter by removing redundant err check Signed-off-by: Stephen J Day --- registry/api/errcode/handler.go | 6 +----- registry/proxy/proxyauth.go | 6 +----- registry/storage/blobwriter.go | 6 +----- 3 files changed, 3 insertions(+), 15 deletions(-) diff --git a/registry/api/errcode/handler.go b/registry/api/errcode/handler.go index 49a64a86..d77e7047 100644 --- a/registry/api/errcode/handler.go +++ b/registry/api/errcode/handler.go @@ -36,9 +36,5 @@ func ServeJSON(w http.ResponseWriter, err error) error { w.WriteHeader(sc) - if err := json.NewEncoder(w).Encode(err); err != nil { - return err - } - - return nil + return json.NewEncoder(w).Encode(err) } diff --git a/registry/proxy/proxyauth.go b/registry/proxy/proxyauth.go index 7b405afc..09a0973e 100644 --- a/registry/proxy/proxyauth.go +++ b/registry/proxy/proxyauth.go @@ -79,9 +79,5 @@ func ping(manager challenge.Manager, endpoint, versionHeader string) error { } defer resp.Body.Close() - if err := manager.AddResponse(resp); err != nil { - return err - } - - return nil + return manager.AddResponse(resp) } diff --git a/registry/storage/blobwriter.go b/registry/storage/blobwriter.go index d56f2874..2bb25dcc 100644 --- a/registry/storage/blobwriter.go +++ b/registry/storage/blobwriter.go @@ -104,11 +104,7 @@ func (bw *blobWriter) Cancel(ctx context.Context) error { dcontext.GetLogger(ctx).Errorf("error closing blobwriter: %s", err) } - if err := bw.removeResources(ctx); err != nil { - return err - } - - return nil + return bw.removeResources(ctx) } func (bw *blobWriter) Size() int64 { From 3c5f85abd10c949a238e89efef667c3a40b17303 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Tue, 19 Sep 2017 21:25:37 -0400 Subject: [PATCH 060/276] Allow clients to request specific manifest media types The current registry/client sends the registered manifest types in random order. Allow clients to request a single specific manifest type or a preferred order as per the HTTP spec. Signed-off-by: Clayton Coleman --- registry.go | 15 ++++++++ registry/client/repository.go | 16 ++++++-- registry/client/repository_test.go | 61 ++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+), 4 deletions(-) diff --git a/registry.go b/registry.go index c34207d0..a3a80ab8 100644 --- a/registry.go +++ b/registry.go @@ -73,6 +73,21 @@ func (o WithTagOption) Apply(m ManifestService) error { return nil } +// WithManifestMediaTypes lists the media types the client wishes +// the server to provide. +func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { + return WithManifestMediaTypesOption{mediaTypes} +} + +// WithManifestMediaTypesOption holds a list of accepted media types +type WithManifestMediaTypesOption struct{ MediaTypes []string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { + // no implementation + return nil +} + // Repository is a named collection of manifests and layers. type Repository interface { // Named returns the name of the repository. diff --git a/registry/client/repository.go b/registry/client/repository.go index f9499552..ed29cf3f 100644 --- a/registry/client/repository.go +++ b/registry/client/repository.go @@ -418,18 +418,22 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis ref reference.Named err error contentDgst *digest.Digest + mediaTypes []string ) for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { + switch opt := option.(type) { + case distribution.WithTagOption: digestOrTag = opt.Tag ref, err = reference.WithTag(ms.name, opt.Tag) if err != nil { return nil, err } - } else if opt, ok := option.(contentDigestOption); ok { + case contentDigestOption: contentDgst = opt.digest - } else { + case distribution.WithManifestMediaTypesOption: + mediaTypes = opt.MediaTypes + default: err := option.Apply(ms) if err != nil { return nil, err @@ -445,6 +449,10 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis } } + if len(mediaTypes) == 0 { + mediaTypes = distribution.ManifestMediaTypes() + } + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return nil, err @@ -455,7 +463,7 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis return nil, err } - for _, t := range distribution.ManifestMediaTypes() { + for _, t := range mediaTypes { req.Header.Add("Accept", t) } diff --git a/registry/client/repository_test.go b/registry/client/repository_test.go index e5a6dad0..3bcf63e1 100644 --- a/registry/client/repository_test.go +++ b/registry/client/repository_test.go @@ -9,6 +9,8 @@ import ( "log" "net/http" "net/http/httptest" + "reflect" + "sort" "strconv" "strings" "testing" @@ -815,6 +817,65 @@ func TestManifestFetchWithEtag(t *testing.T) { } } +func TestManifestFetchWithAccept(t *testing.T) { + ctx := context.Background() + repo, _ := reference.WithName("test.example.com/repo") + _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) + headers := make(chan []string, 1) + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + headers <- req.Header["Accept"] + })) + defer close(headers) + defer s.Close() + + r, err := NewRepository(repo, s.URL, nil) + if err != nil { + t.Fatal(err) + } + ms, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + // the media types we send + mediaTypes []string + // the expected Accept headers the server should receive + expect []string + // whether to sort the request and response values for comparison + sort bool + }{ + { + mediaTypes: []string{}, + expect: distribution.ManifestMediaTypes(), + sort: true, + }, + { + mediaTypes: []string{"test1", "test2"}, + expect: []string{"test1", "test2"}, + }, + { + mediaTypes: []string{"test1"}, + expect: []string{"test1"}, + }, + { + mediaTypes: []string{""}, + expect: []string{""}, + }, + } + for _, testCase := range testCases { + ms.Get(ctx, dgst, distribution.WithManifestMediaTypes(testCase.mediaTypes)) + actual := <-headers + if testCase.sort { + sort.Strings(actual) + sort.Strings(testCase.expect) + } + if !reflect.DeepEqual(actual, testCase.expect) { + t.Fatalf("unexpected Accept header values: %v", actual) + } + } +} + func TestManifestDelete(t *testing.T) { repo, _ := reference.WithName("test.example.com/repo/delete") _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) From d66208108d06ce9d7154ca3d39eb8021f5e4e8f9 Mon Sep 17 00:00:00 2001 From: zhouhaibing089 Date: Sat, 14 Oct 2017 12:31:21 +0800 Subject: [PATCH 061/276] cmd/digest: import crypto algorithms the digest cli does not work if we do not import this two packages, tested in go1.9. basically, we have to make several algorithms to be available by calling crypto.RegisterHash in init functions. Signed-off-by: zhouhaibing089 --- cmd/digest/main.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/digest/main.go b/cmd/digest/main.go index 20f64ddb..308be461 100644 --- a/cmd/digest/main.go +++ b/cmd/digest/main.go @@ -9,6 +9,9 @@ import ( "github.com/docker/distribution/version" "github.com/opencontainers/go-digest" + + _ "crypto/sha256" + _ "crypto/sha512" ) var ( From fda42e5ef908bdba722d435ff1f330d40dfcd56c Mon Sep 17 00:00:00 2001 From: Misty Stanley-Jones Date: Fri, 20 Oct 2017 11:16:21 -0700 Subject: [PATCH 062/276] Fix keyword format for downstream docs Signed-off-by: Misty Stanley-Jones --- docs/spec/api.md | 2 +- docs/spec/api.md.tmpl | 2 +- docs/spec/auth/index.md | 2 +- docs/spec/auth/jwt.md | 2 +- docs/spec/auth/oauth.md | 2 +- docs/spec/auth/scope.md | 2 +- docs/spec/auth/token.md | 2 +- docs/spec/index.md | 2 +- docs/spec/manifest-v2-1.md | 2 +- docs/spec/manifest-v2-2.md | 2 +- docs/spec/menu.md | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/spec/api.md b/docs/spec/api.md index 29dc51e5..0a2fc5e5 100644 --- a/docs/spec/api.md +++ b/docs/spec/api.md @@ -1,7 +1,7 @@ --- title: "HTTP API V2" description: "Specification for the Registry API." -keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced"] +keywords: registry, on-prem, images, tags, repository, distribution, api, advanced --- # Docker Registry HTTP API V2 diff --git a/docs/spec/api.md.tmpl b/docs/spec/api.md.tmpl index 4338b057..79879ad6 100644 --- a/docs/spec/api.md.tmpl +++ b/docs/spec/api.md.tmpl @@ -1,7 +1,7 @@ --- title: "HTTP API V2" description: "Specification for the Registry API." -keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced"] +keywords: registry, on-prem, images, tags, repository, distribution, api, advanced --- # Docker Registry HTTP API V2 diff --git a/docs/spec/auth/index.md b/docs/spec/auth/index.md index d1aa9422..8c4bf5e2 100644 --- a/docs/spec/auth/index.md +++ b/docs/spec/auth/index.md @@ -1,7 +1,7 @@ --- title: "Docker Registry Token Authentication" description: "Docker Registry v2 authentication schema" -keywords: ["registry, on-prem, images, tags, repository, distribution, authentication, advanced"] +keywords: registry, on-prem, images, tags, repository, distribution, authentication, advanced --- # Docker Registry v2 authentication diff --git a/docs/spec/auth/jwt.md b/docs/spec/auth/jwt.md index aa9941b0..ef729efe 100644 --- a/docs/spec/auth/jwt.md +++ b/docs/spec/auth/jwt.md @@ -1,7 +1,7 @@ --- title: "Token Authentication Implementation" description: "Describe the reference implementation of the Docker Registry v2 authentication schema" -keywords: ["registry, on-prem, images, tags, repository, distribution, JWT authentication, advanced"] +keywords: registry, on-prem, images, tags, repository, distribution, JWT authentication, advanced --- # Docker Registry v2 Bearer token specification diff --git a/docs/spec/auth/oauth.md b/docs/spec/auth/oauth.md index d946da8a..f4fdec81 100644 --- a/docs/spec/auth/oauth.md +++ b/docs/spec/auth/oauth.md @@ -1,7 +1,7 @@ --- title: "Oauth2 Token Authentication" description: "Specifies the Docker Registry v2 authentication" -keywords: ["registry, on-prem, images, tags, repository, distribution, oauth2, advanced"] +keywords: registry, on-prem, images, tags, repository, distribution, oauth2, advanced --- # Docker Registry v2 authentication using OAuth2 diff --git a/docs/spec/auth/scope.md b/docs/spec/auth/scope.md index 6ef61edf..037fd676 100644 --- a/docs/spec/auth/scope.md +++ b/docs/spec/auth/scope.md @@ -1,7 +1,7 @@ --- title: "Token Scope Documentation" description: "Describes the scope and access fields used for registry authorization tokens" -keywords: ["registry, on-prem, images, tags, repository, distribution, advanced, access, scope"] +keywords: registry, on-prem, images, tags, repository, distribution, advanced, access, scope --- # Docker Registry Token Scope and Access diff --git a/docs/spec/auth/token.md b/docs/spec/auth/token.md index f5441980..97f1971d 100644 --- a/docs/spec/auth/token.md +++ b/docs/spec/auth/token.md @@ -1,7 +1,7 @@ --- title: "Token Authentication Specification" description: "Specifies the Docker Registry v2 authentication" -keywords: ["registry, on-prem, images, tags, repository, distribution, Bearer authentication, advanced"] +keywords: registry, on-prem, images, tags, repository, distribution, Bearer authentication, advanced --- # Docker Registry v2 authentication via central service diff --git a/docs/spec/index.md b/docs/spec/index.md index 952ebabd..f397628d 100644 --- a/docs/spec/index.md +++ b/docs/spec/index.md @@ -1,7 +1,7 @@ --- title: "Reference Overview" description: "Explains registry JSON objects" -keywords: ["registry, service, images, repository, json"] +keywords: registry, service, images, repository, json --- # Docker Registry Reference diff --git a/docs/spec/manifest-v2-1.md b/docs/spec/manifest-v2-1.md index 1fadc25a..335509b0 100644 --- a/docs/spec/manifest-v2-1.md +++ b/docs/spec/manifest-v2-1.md @@ -1,7 +1,7 @@ --- title: "Image Manifest V 2, Schema 1 " description: "image manifest for the Registry." -keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"] +keywords: registry, on-prem, images, tags, repository, distribution, api, advanced, manifest --- # Image Manifest Version 2, Schema 1 diff --git a/docs/spec/manifest-v2-2.md b/docs/spec/manifest-v2-2.md index 2319cf6d..3d646a70 100644 --- a/docs/spec/manifest-v2-2.md +++ b/docs/spec/manifest-v2-2.md @@ -1,7 +1,7 @@ --- title: "Image Manifest V 2, Schema 2 " description: "image manifest for the Registry." -keywords: ["registry, on-prem, images, tags, repository, distribution, api, advanced, manifest"] +keywords: registry, on-prem, images, tags, repository, distribution, api, advanced, manifest --- # Image Manifest Version 2, Schema 2 diff --git a/docs/spec/menu.md b/docs/spec/menu.md index 9237e3ce..7e52d8d7 100644 --- a/docs/spec/menu.md +++ b/docs/spec/menu.md @@ -1,7 +1,7 @@ --- title: "Reference" description: "Explains registry JSON objects" -keywords: ["registry, service, images, repository, json"] +keywords: registry, service, images, repository, json type: "menu" identifier: "smn_registry_ref" --- From f74613907d5d633b135d17520554bf27eda09bab Mon Sep 17 00:00:00 2001 From: zhouhaibing089 Date: Tue, 31 Oct 2017 16:33:36 +0800 Subject: [PATCH 063/276] digestset: refine some words on unit test 1. when lookup an entry which is missing, it should say NotFound. 2. when add duplicated entry, the entries size should be increased. 3. when add entry which has different algorithm, it should be allowed. Signed-off-by: zhouhaibing089 --- digestset/set_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/digestset/set_test.go b/digestset/set_test.go index 89c5729d..67d46c62 100644 --- a/digestset/set_test.go +++ b/digestset/set_test.go @@ -51,7 +51,7 @@ func TestLookup(t *testing.T) { dgst, err = dset.Lookup("9876") if err == nil { - t.Fatal("Expected ambiguous error looking up: 9876") + t.Fatal("Expected not found error looking up: 9876") } if err != ErrDigestNotFound { t.Fatal(err) @@ -118,7 +118,7 @@ func TestAddDuplication(t *testing.T) { } if len(dset.entries) != 8 { - t.Fatal("Duplicate digest insert allowed") + t.Fatal("Duplicate digest insert should not increase entries size") } if err := dset.Add(digest.Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { @@ -126,7 +126,7 @@ func TestAddDuplication(t *testing.T) { } if len(dset.entries) != 9 { - t.Fatal("Insert with different algorithm not allowed") + t.Fatal("Insert with different algorithm should be allowed") } } From c785740af7dd309838139d0798f129d9d372a427 Mon Sep 17 00:00:00 2001 From: Liron Levin Date: Sun, 13 Aug 2017 08:56:11 +0300 Subject: [PATCH 064/276] Create and populate htpasswd file if missing If htpasswd authentication option is configured but the htpasswd file is missing, populate it with a default user and automatically generated password. The password will be printed to stdout. Signed-off-by: Liron Levin --- cmd/registry/config-example.yml | 4 +++ docs/configuration.md | 4 +++ registry/auth/htpasswd/access.go | 51 ++++++++++++++++++++++++--- registry/auth/htpasswd/access_test.go | 40 +++++++++++++++++++++ registry/handlers/app.go | 2 +- 5 files changed, 96 insertions(+), 5 deletions(-) diff --git a/cmd/registry/config-example.yml b/cmd/registry/config-example.yml index 3277f9a2..c760cd56 100644 --- a/cmd/registry/config-example.yml +++ b/cmd/registry/config-example.yml @@ -11,6 +11,10 @@ http: addr: :5000 headers: X-Content-Type-Options: [nosniff] +auth: + htpasswd: + realm: basic-realm + path: /etc/registry health: storagedriver: enabled: true diff --git a/docs/configuration.md b/docs/configuration.md index c7f9023f..9bf89d8c 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -553,6 +553,7 @@ The `auth` option is **optional**. Possible auth providers include: - [`silly`](#silly) - [`token`](#token) - [`htpasswd`](#htpasswd) +- [`none`] You can configure only one authentication provider. @@ -598,6 +599,9 @@ The only supported password format is are ignored. The `htpasswd` file is loaded once, at startup. If the file is invalid, the registry will display an error and will not start. +> **Warning**: If the `htpasswd` file is missing, the file will be created and provisioned with a default user and automatically generated password. +> The password will be printed to stdout. + > **Warning**: Only use the `htpasswd` authentication scheme with TLS > configured, since basic authentication sends passwords as part of the HTTP > header. diff --git a/registry/auth/htpasswd/access.go b/registry/auth/htpasswd/access.go index 8f40913b..034c503a 100644 --- a/registry/auth/htpasswd/access.go +++ b/registry/auth/htpasswd/access.go @@ -7,9 +7,13 @@ package htpasswd import ( "context" + "crypto/rand" + "encoding/base64" "fmt" + "golang.org/x/crypto/bcrypt" "net/http" "os" + "path/filepath" "sync" "time" @@ -33,12 +37,15 @@ func newAccessController(options map[string]interface{}) (auth.AccessController, return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) } - path, present := options["path"] - if _, ok := path.(string); !present || !ok { + pathOpt, present := options["path"] + path, ok := pathOpt.(string) + if !present || !ok { return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) } - - return &accessController{realm: realm.(string), path: path.(string)}, nil + if err := createHtpasswdFile(path); err != nil { + return nil, err + } + return &accessController{realm: realm.(string), path: path}, nil } func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { @@ -111,6 +118,42 @@ func (ch challenge) Error() string { return fmt.Sprintf("basic authentication challenge for realm %q: %s", ch.realm, ch.err) } +// createHtpasswdFile creates and populates htpasswd file with a new user in case the file is missing +func createHtpasswdFile(path string) error { + if f, err := os.Open(path); err == nil { + f.Close() + return nil + } else if !os.IsNotExist(err) { + return err + } + + if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + return fmt.Errorf("failed to open htpasswd path %s", err) + } + defer f.Close() + var secretBytes [32]byte + if _, err := rand.Read(secretBytes[:]); err != nil { + return err + } + pass := base64.RawURLEncoding.EncodeToString(secretBytes[:]) + encryptedPass, err := bcrypt.GenerateFromPassword([]byte(pass), bcrypt.DefaultCost) + if err != nil { + return err + } + if _, err := f.Write([]byte(fmt.Sprintf("docker:%s", string(encryptedPass[:])))); err != nil { + return err + } + dcontext.GetLoggerWithFields(context.Background(), map[interface{}]interface{}{ + "user": "docker", + "password": pass, + }).Warnf("htpasswd is missing, provisioning with default user") + return nil +} + func init() { auth.Register("htpasswd", auth.InitFunc(newAccessController)) } diff --git a/registry/auth/htpasswd/access_test.go b/registry/auth/htpasswd/access_test.go index 553f05cf..7a3d411e 100644 --- a/registry/auth/htpasswd/access_test.go +++ b/registry/auth/htpasswd/access_test.go @@ -1,9 +1,11 @@ package htpasswd import ( + "bytes" "io/ioutil" "net/http" "net/http/httptest" + "os" "testing" "github.com/docker/distribution/context" @@ -120,3 +122,41 @@ func TestBasicAccessController(t *testing.T) { } } + +func TestCreateHtpasswdFile(t *testing.T) { + tempFile, err := ioutil.TempFile("", "htpasswd-test") + if err != nil { + t.Fatalf("could not create temporary htpasswd file %v", err) + } + defer tempFile.Close() + options := map[string]interface{}{ + "realm": "/auth/htpasswd", + "path": tempFile.Name(), + } + // Ensure file is not populated + if _, err := newAccessController(options); err != nil { + t.Fatalf("error creating access controller %v", err) + } + content, err := ioutil.ReadAll(tempFile) + if err != nil { + t.Fatalf("failed to read file %v", err) + } + if !bytes.Equal([]byte{}, content) { + t.Fatalf("htpasswd file should not be populated %v", string(content)) + } + if err := os.Remove(tempFile.Name()); err != nil { + t.Fatalf("failed to remove temp file %v", err) + } + + // Ensure htpasswd file is populated + if _, err := newAccessController(options); err != nil { + t.Fatalf("error creating access controller %v", err) + } + content, err = ioutil.ReadFile(tempFile.Name()) + if err != nil { + t.Fatalf("failed to read file %v", err) + } + if !bytes.HasPrefix(content, []byte("docker:$2a$")) { + t.Fatalf("failed to find default user in file %s", string(content)) + } +} diff --git a/registry/handlers/app.go b/registry/handlers/app.go index e38050cb..cfbb8d5f 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -300,7 +300,7 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { authType := config.Auth.Type() - if authType != "" { + if authType != "" && !strings.EqualFold(authType, "none") { accessController, err := auth.GetAccessController(config.Auth.Type(), config.Auth.Parameters()) if err != nil { panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) From 1a860d8c191d25dbbbfe69c2c604ea12b92e4fc4 Mon Sep 17 00:00:00 2001 From: gbarr01 Date: Thu, 9 Nov 2017 14:15:00 -0800 Subject: [PATCH 065/276] Add text about win os version ordering --- docs/spec/manifest-v2-2.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/spec/manifest-v2-2.md b/docs/spec/manifest-v2-2.md index 2319cf6d..689d9e83 100644 --- a/docs/spec/manifest-v2-2.md +++ b/docs/spec/manifest-v2-2.md @@ -98,6 +98,9 @@ image manifest based on the Content-Type returned in the HTTP response. The optional os.version field specifies the operating system version, for example `10.0.10586`. + Windows versions are ordered so that the first compatible image is + at the top of the list. + - **`os.features`** *array* The optional os.features field specifies an array of strings, From e5b5e44386f7964a1f010a2b8b7687d073215bbb Mon Sep 17 00:00:00 2001 From: gbarr01 Date: Thu, 9 Nov 2017 14:49:04 -0800 Subject: [PATCH 066/276] Revert "Add text about win os version ordering" This reverts commit 1a860d8c191d25dbbbfe69c2c604ea12b92e4fc4. --- docs/spec/manifest-v2-2.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/spec/manifest-v2-2.md b/docs/spec/manifest-v2-2.md index 689d9e83..2319cf6d 100644 --- a/docs/spec/manifest-v2-2.md +++ b/docs/spec/manifest-v2-2.md @@ -98,9 +98,6 @@ image manifest based on the Content-Type returned in the HTTP response. The optional os.version field specifies the operating system version, for example `10.0.10586`. - Windows versions are ordered so that the first compatible image is - at the top of the list. - - **`os.features`** *array* The optional os.features field specifies an array of strings, From c9eba1a5bb737c9b2b5d793f6e5ceda9b976d9b1 Mon Sep 17 00:00:00 2001 From: Gladkov Alexey Date: Tue, 14 Nov 2017 15:16:02 +0100 Subject: [PATCH 067/276] Fix the pointer initialization If the overwriteStruct() finds an uninitialized pointer, it tries to initialize it, but does it incorrectly. It tries to assign a pointer to pointer, instead of pointer. Signed-off-by: Gladkov Alexey --- configuration/parser.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configuration/parser.go b/configuration/parser.go index b46f7326..3e9d587f 100644 --- a/configuration/parser.go +++ b/configuration/parser.go @@ -220,7 +220,7 @@ func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string } case reflect.Ptr: if field.IsNil() { - field.Set(reflect.New(sf.Type)) + field.Set(reflect.New(field.Type().Elem())) } } From e69837454adaffbcadfb11bb795db496a0cbb4ad Mon Sep 17 00:00:00 2001 From: Gladkov Alexey Date: Wed, 15 Nov 2017 16:37:43 +0100 Subject: [PATCH 068/276] Add tests for configuration parser Signed-off-by: Gladkov Alexey --- configuration/parser_test.go | 70 ++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 configuration/parser_test.go diff --git a/configuration/parser_test.go b/configuration/parser_test.go new file mode 100644 index 00000000..344b2ee4 --- /dev/null +++ b/configuration/parser_test.go @@ -0,0 +1,70 @@ +package configuration + +import ( + "os" + "reflect" + + . "gopkg.in/check.v1" +) + +type localConfiguration struct { + Version Version `yaml:"version"` + Log *Log `yaml:"log"` +} + +type Log struct { + Formatter string `yaml:"formatter,omitempty"` +} + +var expectedConfig = localConfiguration{ + Version: "0.1", + Log: &Log{ + Formatter: "json", + }, +} + +type ParserSuite struct{} + +var _ = Suite(new(ParserSuite)) + +func (suite *ParserSuite) TestParserOverwriteIninitializedPoiner(c *C) { + config := localConfiguration{} + + os.Setenv("REGISTRY_LOG_FORMATTER", "json") + defer os.Unsetenv("REGISTRY_LOG_FORMATTER") + + p := NewParser("registry", []VersionedParseInfo{ + { + Version: "0.1", + ParseAs: reflect.TypeOf(config), + ConversionFunc: func(c interface{}) (interface{}, error) { + return c, nil + }, + }, + }) + + err := p.Parse([]byte(`{version: "0.1", log: {formatter: "text"}}`), &config) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, expectedConfig) +} + +func (suite *ParserSuite) TestParseOverwriteUnininitializedPoiner(c *C) { + config := localConfiguration{} + + os.Setenv("REGISTRY_LOG_FORMATTER", "json") + defer os.Unsetenv("REGISTRY_LOG_FORMATTER") + + p := NewParser("registry", []VersionedParseInfo{ + { + Version: "0.1", + ParseAs: reflect.TypeOf(config), + ConversionFunc: func(c interface{}) (interface{}, error) { + return c, nil + }, + }, + }) + + err := p.Parse([]byte(`{version: "0.1"}`), &config) + c.Assert(err, IsNil) + c.Assert(config, DeepEquals, expectedConfig) +} From 1bfbeca7269121e55d2b7a66853563aa25973790 Mon Sep 17 00:00:00 2001 From: Kevin Lin Date: Sat, 18 Nov 2017 21:50:22 -0800 Subject: [PATCH 069/276] Properly follow relative links when listing tags The previous code assumed that the link returned when listing tags was always absolute. However, some registries, such as quay.io, return the link as a relative link (e.g. the second page for the quay.io/coreos/etcd image is /v2/coreos/etcd/tags/list?next_page=&n=50). Because the relative link was retrieved directly, the fetch failed (with the error `unsupported protocol scheme ""`). Signed-off-by: Kevin Lin --- registry/client/repository.go | 17 ++++++++++++++--- registry/client/repository_test.go | 18 ++++++++++++++++-- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/registry/client/repository.go b/registry/client/repository.go index ed29cf3f..d8e2c795 100644 --- a/registry/client/repository.go +++ b/registry/client/repository.go @@ -205,13 +205,18 @@ type tags struct { func (t *tags) All(ctx context.Context) ([]string, error) { var tags []string - u, err := t.ub.BuildTagsURL(t.name) + listURLStr, err := t.ub.BuildTagsURL(t.name) + if err != nil { + return tags, err + } + + listURL, err := url.Parse(listURLStr) if err != nil { return tags, err } for { - resp, err := t.client.Get(u) + resp, err := t.client.Get(listURL.String()) if err != nil { return tags, err } @@ -231,7 +236,13 @@ func (t *tags) All(ctx context.Context) ([]string, error) { } tags = append(tags, tagsResponse.Tags...) if link := resp.Header.Get("Link"); link != "" { - u = strings.Trim(strings.Split(link, ";")[0], "<>") + linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") + linkURL, err := url.Parse(linkURLStr) + if err != nil { + return tags, err + } + + listURL = listURL.ResolveReference(linkURL) } else { return tags, nil } diff --git a/registry/client/repository_test.go b/registry/client/repository_test.go index 3bcf63e1..4018a928 100644 --- a/registry/client/repository_test.go +++ b/registry/client/repository_test.go @@ -1136,13 +1136,27 @@ func TestManifestTagsPaginated(t *testing.T) { queryParams["n"] = []string{"1"} queryParams["last"] = []string{tagsList[i-1]} } + + // Test both relative and absolute links. + relativeLink := "/v2/" + repo.Name() + "/tags/list?n=1&last=" + tagsList[i] + var link string + switch i { + case 0: + link = relativeLink + case len(tagsList) - 1: + link = "" + default: + link = s.URL + relativeLink + } + headers := http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(body))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }) - if i < 2 { - headers.Set("Link", "<"+s.URL+"/v2/"+repo.Name()+"/tags/list?n=1&last="+tagsList[i]+`>; rel="next"`) + if link != "" { + headers.Set("Link", fmt.Sprintf(`<%s>; rel="next"`, link)) } + m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", From 118c8ee1f189e2c694b12a2725c656182db27856 Mon Sep 17 00:00:00 2001 From: Monika Katiyar Date: Wed, 16 Aug 2017 15:59:41 +0530 Subject: [PATCH 070/276] Added nginx configuration for uploading large sized blobs (layer size) to registry when using nginx as reverse proxy from monikakatiyar16 Signed-off-by: Monika Katiyar --- contrib/compose/nginx/docker-registry-v2.conf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/compose/nginx/docker-registry-v2.conf b/contrib/compose/nginx/docker-registry-v2.conf index 65c4d776..df698c79 100644 --- a/contrib/compose/nginx/docker-registry-v2.conf +++ b/contrib/compose/nginx/docker-registry-v2.conf @@ -4,3 +4,6 @@ proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_read_timeout 900; +proxy_send_timeout 300; +proxy_request_buffering off; (see issue #2292 - https://github.com/moby/moby/issues/2292) +proxy_http_version 1.1; From e8ecc6dc5506b2046800221ee66583ae692640eb Mon Sep 17 00:00:00 2001 From: tifayuki Date: Tue, 14 Nov 2017 17:21:36 -0800 Subject: [PATCH 071/276] add s3 region filters for cloudfront Signed-off-by: tifayuki --- .gitignore | 1 + context/logger.go | 2 + docs/configuration.md | 16 + .../middleware/cloudfront/middleware.go | 79 ++- .../driver/middleware/cloudfront/s3filter.go | 223 ++++++ .../middleware/cloudfront/s3filter_test.go | 401 +++++++++++ vendor/github.com/miekg/dns/msg_generate.go | 340 --------- vendor/github.com/miekg/dns/types_generate.go | 271 ------- vendor/golang.org/x/net/idna/idna.go | 68 -- vendor/golang.org/x/net/idna/punycode.go | 200 ------ vendor/golang.org/x/net/publicsuffix/gen.go | 663 ------------------ 11 files changed, 716 insertions(+), 1548 deletions(-) create mode 100644 registry/storage/driver/middleware/cloudfront/s3filter.go create mode 100644 registry/storage/driver/middleware/cloudfront/s3filter_test.go delete mode 100644 vendor/github.com/miekg/dns/msg_generate.go delete mode 100644 vendor/github.com/miekg/dns/types_generate.go delete mode 100644 vendor/golang.org/x/net/idna/idna.go delete mode 100644 vendor/golang.org/x/net/idna/punycode.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/gen.go diff --git a/.gitignore b/.gitignore index 1c3ae0a7..4cf7888e 100644 --- a/.gitignore +++ b/.gitignore @@ -35,3 +35,4 @@ bin/* # Editor/IDE specific files. *.sublime-project *.sublime-workspace +.idea/* diff --git a/context/logger.go b/context/logger.go index afc8860b..3e5b81bb 100644 --- a/context/logger.go +++ b/context/logger.go @@ -39,6 +39,8 @@ type Logger interface { Warn(args ...interface{}) Warnf(format string, args ...interface{}) Warnln(args ...interface{}) + + WithError(err error) *logrus.Entry } type loggerKey struct{} diff --git a/docs/configuration.md b/docs/configuration.md index c7f9023f..807353cc 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -183,6 +183,10 @@ middleware: privatekey: /path/to/pem keypairid: cloudfrontkeypairid duration: 3000s + ipfilteredby: awsregion + awsregion: us-east-1, use-east-2 + updatefrenquency: 12h + iprangesurl: https://ip-ranges.amazonaws.com/ip-ranges.json storage: - name: redirect options: @@ -636,6 +640,10 @@ middleware: privatekey: /path/to/pem keypairid: cloudfrontkeypairid duration: 3000s + ipfilteredby: awsregion + awsregion: us-east-1, use-east-2 + updatefrenquency: 12h + iprangesurl: https://ip-ranges.amazonaws.com/ip-ranges.json ``` Each middleware entry has `name` and `options` entries. The `name` must @@ -655,6 +663,14 @@ interpretation of the options. | `privatekey` | yes | The private key for Cloudfront, provided by AWS. | | `keypairid` | yes | The key pair ID provided by AWS. | | `duration` | no | An integer and unit for the duration of the Cloudfront session. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, or `h`. For example, `3000s` is valid, but `3000 s` is not. If you do not specify a `duration` or you specify an integer without a time unit, the duration defaults to `20m` (20 minutes).| +|`ipfilteredby`|no | A string with the following value `none|aws|awsregion`. | +|`awsregion`|no | A comma separated string of AWS regions, only available when `ipfilteredby` is `awsregion`. For example, `us-east-1, us-west-2`| +|`updatefrenquency`|no | The frequency to update AWS IP regions, default: `12h`| +|`iprangesurl`|no | The URL contains the AWS IP ranges information, default: `https://ip-ranges.amazonaws.com/ip-ranges.json`| +Then value of ipfilteredby: +`none`: default, do not filter by IP +`aws`: IP from AWS goes to S3 directly +`awsregion`: IP from certain AWS regions goes to S3 directly, use together with `awsregion` ### `redirect` diff --git a/registry/storage/driver/middleware/cloudfront/middleware.go b/registry/storage/driver/middleware/cloudfront/middleware.go index 61e787a4..5dc3ee41 100644 --- a/registry/storage/driver/middleware/cloudfront/middleware.go +++ b/registry/storage/driver/middleware/cloudfront/middleware.go @@ -16,7 +16,7 @@ import ( "github.com/aws/aws-sdk-go/service/cloudfront/sign" dcontext "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" - storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + "github.com/docker/distribution/registry/storage/driver/middleware" ) // cloudFrontStorageMiddleware provides a simple implementation of layerHandler that @@ -24,6 +24,7 @@ import ( // then issues HTTP Temporary Redirects to this CloudFront content URL. type cloudFrontStorageMiddleware struct { storagedriver.StorageDriver + awsIPs *awsIPs urlSigner *sign.URLSigner baseURL string duration time.Duration @@ -34,7 +35,13 @@ var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} // newCloudFrontLayerHandler constructs and returns a new CloudFront // LayerHandler implementation. // Required options: baseurl, privatekey, keypairid + +// Optional options: ipFilteredBy, awsregion +// ipfilteredby: valid value "none|aws|awsregion". "none", do not filter any IP, default value. "aws", only aws IP goes +// to S3 directly. "awsregion", only regions listed in awsregion options goes to S3 directly +// awsregion: a comma separated string of AWS regions. func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { + // parse baseurl base, ok := options["baseurl"] if !ok { return nil, fmt.Errorf("no baseurl provided") @@ -52,6 +59,8 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o if _, err := url.Parse(baseURL); err != nil { return nil, fmt.Errorf("invalid baseurl: %v", err) } + + // parse privatekey to get pkPath pk, ok := options["privatekey"] if !ok { return nil, fmt.Errorf("no privatekey provided") @@ -60,6 +69,8 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o if !ok { return nil, fmt.Errorf("privatekey must be a string") } + + // parse keypairid kpid, ok := options["keypairid"] if !ok { return nil, fmt.Errorf("no keypairid provided") @@ -69,6 +80,7 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o return nil, fmt.Errorf("keypairid must be a string") } + // get urlSigner from the file specified in pkPath pkBytes, err := ioutil.ReadFile(pkPath) if err != nil { return nil, fmt.Errorf("failed to read privatekey file: %s", err) @@ -82,12 +94,11 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o if err != nil { return nil, err } - urlSigner := sign.NewURLSigner(keypairID, privateKey) + // parse duration duration := 20 * time.Minute - d, ok := options["duration"] - if ok { + if d, ok := options["duration"]; ok { switch d := d.(type) { case time.Duration: duration = d @@ -100,11 +111,62 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o } } + // parse updatefrenquency + updateFrequency := defaultUpdateFrequency + if u, ok := options["updatefrenquency"]; ok { + switch u := u.(type) { + case time.Duration: + updateFrequency = u + case string: + updateFreq, err := time.ParseDuration(u) + if err != nil { + return nil, fmt.Errorf("invalid updatefrenquency: %s", err) + } + duration = updateFreq + } + } + + // parse iprangesurl + ipRangesURL := defaultIPRangesURL + if i, ok := options["iprangesurl"]; ok { + if iprangeurl, ok := i.(string); ok { + ipRangesURL = iprangeurl + } else { + return nil, fmt.Errorf("iprangesurl must be a string") + } + } + + // parse ipfilteredby + var awsIPs *awsIPs + if ipFilteredBy := options["ipfilteredby"].(string); ok { + switch strings.ToLower(strings.TrimSpace(ipFilteredBy)) { + case "", "none": + awsIPs = nil + case "aws": + newAWSIPs(ipRangesURL, updateFrequency, nil) + case "awsregion": + var awsRegion []string + if regions, ok := options["awsregion"].(string); ok { + for _, awsRegions := range strings.Split(regions, ",") { + awsRegion = append(awsRegion, strings.ToLower(strings.TrimSpace(awsRegions))) + } + awsIPs = newAWSIPs(ipRangesURL, updateFrequency, awsRegion) + } else { + return nil, fmt.Errorf("awsRegion must be a comma separated string of valid aws regions") + } + default: + return nil, fmt.Errorf("ipfilteredby only allows a string the following value: none|aws|awsregion") + } + } else { + return nil, fmt.Errorf("ipfilteredby only allows a string with the following value: none|aws|awsregion") + } + return &cloudFrontStorageMiddleware{ StorageDriver: storageDriver, urlSigner: urlSigner, baseURL: baseURL, duration: duration, + awsIPs: awsIPs, }, nil } @@ -114,8 +176,8 @@ type S3BucketKeyer interface { S3BucketKey(path string) string } -// Resolve returns an http.Handler which can serve the contents of the given -// Layer, or an error if not supported by the storagedriver. +// URLFor attempts to find a url which may be used to retrieve the file at the given path. +// Returns an error if the file cannot be found. func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { // TODO(endophage): currently only supports S3 keyer, ok := lh.StorageDriver.(S3BucketKeyer) @@ -124,6 +186,11 @@ func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, return lh.StorageDriver.URLFor(ctx, path, options) } + if eligibleForS3(ctx, lh.awsIPs) { + return lh.StorageDriver.URLFor(ctx, path, options) + } + + // Get signed cloudfront url. cfURL, err := lh.urlSigner.Sign(lh.baseURL+keyer.S3BucketKey(path), time.Now().Add(lh.duration)) if err != nil { return "", err diff --git a/registry/storage/driver/middleware/cloudfront/s3filter.go b/registry/storage/driver/middleware/cloudfront/s3filter.go new file mode 100644 index 00000000..c8c7f570 --- /dev/null +++ b/registry/storage/driver/middleware/cloudfront/s3filter.go @@ -0,0 +1,223 @@ +package middleware + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "strings" + "sync" + "time" + + dcontext "github.com/docker/distribution/context" +) + +const ( + // ipRangesURL is the URL to get definition of AWS IPs + defaultIPRangesURL = "https://ip-ranges.amazonaws.com/ip-ranges.json" + // updateFrequency tells how frequently AWS IPs need to be updated + defaultUpdateFrequency = time.Hour * 12 +) + +// newAWSIPs returns a New awsIP object. +// If awsRegion is `nil`, it accepts any region. Otherwise, it only allow the regions specified +func newAWSIPs(host string, updateFrequency time.Duration, awsRegion []string) *awsIPs { + ips := &awsIPs{ + host: host, + updateFrequency: updateFrequency, + awsRegion: awsRegion, + updaterStopChan: make(chan bool), + } + if err := ips.tryUpdate(); err != nil { + dcontext.GetLogger(context.Background()).WithError(err).Warn("failed to update AWS IP") + } + go ips.updater() + return ips +} + +// awsIPs tracks a list of AWS ips, filtered by awsRegion +type awsIPs struct { + host string + updateFrequency time.Duration + ipv4 []net.IPNet + ipv6 []net.IPNet + mutex sync.RWMutex + awsRegion []string + updaterStopChan chan bool + initialized bool +} + +type awsIPResponse struct { + Prefixes []prefixEntry `json:"prefixes"` + V6Prefixes []prefixEntry `json:"ipv6_prefixes"` +} + +type prefixEntry struct { + IPV4Prefix string `json:"ip_prefix"` + IPV6Prefix string `json:"ipv6_prefix"` + Region string `json:"region"` + Service string `json:"service"` +} + +func fetchAWSIPs(url string) (awsIPResponse, error) { + var response awsIPResponse + resp, err := http.Get(url) + if err != nil { + return response, err + } + if resp.StatusCode != 200 { + body, _ := ioutil.ReadAll(resp.Body) + return response, fmt.Errorf("failed to fetch network data. response = %s", body) + } + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&response) + if err != nil { + return response, err + } + return response, nil +} + +// tryUpdate attempts to download the new set of ip addresses. +// tryUpdate must be thread safe with contains +func (s *awsIPs) tryUpdate() error { + response, err := fetchAWSIPs(s.host) + if err != nil { + return err + } + + var ipv4 []net.IPNet + var ipv6 []net.IPNet + + processAddress := func(output *[]net.IPNet, prefix string, region string) { + regionAllowed := false + if len(s.awsRegion) > 0 { + for _, ar := range s.awsRegion { + if strings.ToLower(region) == ar { + regionAllowed = true + break + } + } + } else { + regionAllowed = true + } + + _, network, err := net.ParseCIDR(prefix) + if err != nil { + dcontext.GetLoggerWithFields(dcontext.Background(), map[interface{}]interface{}{ + "cidr": prefix, + }).Error("unparseable cidr") + return + } + if regionAllowed { + *output = append(*output, *network) + } + + } + + for _, prefix := range response.Prefixes { + processAddress(&ipv4, prefix.IPV4Prefix, prefix.Region) + } + for _, prefix := range response.V6Prefixes { + processAddress(&ipv6, prefix.IPV6Prefix, prefix.Region) + } + s.mutex.Lock() + defer s.mutex.Unlock() + // Update each attr of awsips atomically. + s.ipv4 = ipv4 + s.ipv6 = ipv6 + s.initialized = true + return nil +} + +// This function is meant to be run in a background goroutine. +// It will periodically update the ips from aws. +func (s *awsIPs) updater() { + defer close(s.updaterStopChan) + for { + time.Sleep(s.updateFrequency) + select { + case <-s.updaterStopChan: + dcontext.GetLogger(context.Background()).Info("aws ip updater received stop signal") + return + default: + err := s.tryUpdate() + if err != nil { + dcontext.GetLogger(context.Background()).WithError(err).Error("git AWS IP") + } + } + } +} + +// getCandidateNetworks returns either the ipv4 or ipv6 networks +// that were last read from aws. The networks returned +// have the same type as the ip address provided. +func (s *awsIPs) getCandidateNetworks(ip net.IP) []net.IPNet { + s.mutex.RLock() + defer s.mutex.RUnlock() + if ip.To4() != nil { + return s.ipv4 + } else if ip.To16() != nil { + return s.ipv6 + } else { + dcontext.GetLoggerWithFields(dcontext.Background(), map[interface{}]interface{}{ + "ip": ip, + }).Error("unknown ip address format") + // assume mismatch, pass through cloudfront + return nil + } +} + +// Contains determines whether the host is within aws. +func (s *awsIPs) contains(ip net.IP) bool { + networks := s.getCandidateNetworks(ip) + for _, network := range networks { + if network.Contains(ip) { + return true + } + } + return false +} + +// parseIPFromRequest attempts to extract the ip address of the +// client that made the request +func parseIPFromRequest(ctx context.Context) (net.IP, error) { + request, err := dcontext.GetRequest(ctx) + if err != nil { + return nil, err + } + ipStr := dcontext.RemoteIP(request) + ip := net.ParseIP(ipStr) + if ip == nil { + return nil, fmt.Errorf("invalid ip address from requester: %s", ipStr) + } + + return ip, nil +} + +// eligibleForS3 checks if a request is eligible for using S3 directly +// Return true only when the IP belongs to a specific aws region and user-agent is docker +func eligibleForS3(ctx context.Context, awsIPs *awsIPs) bool { + if awsIPs != nil && awsIPs.initialized { + if addr, err := parseIPFromRequest(ctx); err == nil { + request, err := dcontext.GetRequest(ctx) + if err != nil { + dcontext.GetLogger(ctx).Warnf("the CloudFront middleware cannot parse the request: %s", err) + } else { + loggerField := map[interface{}]interface{}{ + "user-client": request.UserAgent(), + "ip": dcontext.RemoteIP(request), + } + if awsIPs.contains(addr) { + dcontext.GetLoggerWithFields(ctx, loggerField).Info("request from the allowed AWS region, skipping CloudFront") + return true + } + dcontext.GetLoggerWithFields(ctx, loggerField).Warn("request not from the allowed AWS region, fallback to CloudFront") + } + } else { + dcontext.GetLogger(ctx).WithError(err).Warn("failed to parse ip address from context, fallback to CloudFront") + } + } + return false +} diff --git a/registry/storage/driver/middleware/cloudfront/s3filter_test.go b/registry/storage/driver/middleware/cloudfront/s3filter_test.go new file mode 100644 index 00000000..6aca3abc --- /dev/null +++ b/registry/storage/driver/middleware/cloudfront/s3filter_test.go @@ -0,0 +1,401 @@ +package middleware + +import ( + "context" + "crypto/rand" + "encoding/json" + "fmt" + dcontext "github.com/docker/distribution/context" + "net" + "net/http" + "net/http/httptest" + "testing" + "time" + + "reflect" // used as a replacement for testify +) + +// Rather than pull in all of testify +func assertEqual(t *testing.T, x, y interface{}) { + if !reflect.DeepEqual(x, y) { + t.Errorf("%s: Not equal! Expected='%v', Actual='%v'\n", t.Name(), x, y) + t.FailNow() + } +} + +type mockIPRangeHandler struct { + data awsIPResponse +} + +func (m mockIPRangeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + bytes, err := json.Marshal(m.data) + if err != nil { + w.WriteHeader(500) + return + } + w.Write(bytes) + +} + +func newTestHandler(data awsIPResponse) *httptest.Server { + return httptest.NewServer(mockIPRangeHandler{ + data: data, + }) +} + +func serverIPRanges(server *httptest.Server) string { + return fmt.Sprintf("%s/", server.URL) +} + +func setupTest(data awsIPResponse) *httptest.Server { + // This is a basic schema which only claims the exact ip + // is in aws. + server := newTestHandler(data) + return server +} + +func TestS3TryUpdate(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + {IPV4Prefix: "123.231.123.231/32"}, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, nil) + + assertEqual(t, 1, len(ips.ipv4)) + assertEqual(t, 0, len(ips.ipv6)) + +} + +func TestMatchIPV6(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + V6Prefixes: []prefixEntry{ + {IPV6Prefix: "ff00::/16"}, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, nil) + ips.tryUpdate() + assertEqual(t, true, ips.contains(net.ParseIP("ff00::"))) + assertEqual(t, 1, len(ips.ipv6)) + assertEqual(t, 0, len(ips.ipv4)) +} + +func TestMatchIPV4(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + {IPV4Prefix: "192.168.0.0/24"}, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, nil) + ips.tryUpdate() + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0"))) + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1"))) + assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) +} + +func TestMatchIPV4_2(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + { + IPV4Prefix: "192.168.0.0/24", + Region: "us-east-1", + }, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, nil) + ips.tryUpdate() + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0"))) + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1"))) + assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) +} + +func TestMatchIPV4WithRegionMatched(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + { + IPV4Prefix: "192.168.0.0/24", + Region: "us-east-1", + }, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, []string{"us-east-1"}) + ips.tryUpdate() + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0"))) + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1"))) + assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) +} + +func TestMatchIPV4WithRegionMatch_2(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + { + IPV4Prefix: "192.168.0.0/24", + Region: "us-east-1", + }, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, []string{"us-west-2", "us-east-1"}) + ips.tryUpdate() + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.0"))) + assertEqual(t, true, ips.contains(net.ParseIP("192.168.0.1"))) + assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) +} + +func TestMatchIPV4WithRegionNotMatched(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + { + IPV4Prefix: "192.168.0.0/24", + Region: "us-east-1", + }, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, []string{"us-west-2"}) + ips.tryUpdate() + assertEqual(t, false, ips.contains(net.ParseIP("192.168.0.0"))) + assertEqual(t, false, ips.contains(net.ParseIP("192.168.0.1"))) + assertEqual(t, false, ips.contains(net.ParseIP("192.169.0.0"))) +} + +func TestInvalidData(t *testing.T) { + t.Parallel() + // Invalid entries from aws should be ignored. + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + {IPV4Prefix: "9000"}, + {IPV4Prefix: "192.168.0.0/24"}, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, nil) + ips.tryUpdate() + assertEqual(t, 1, len(ips.ipv4)) +} + +func TestInvalidNetworkType(t *testing.T) { + t.Parallel() + server := setupTest(awsIPResponse{ + Prefixes: []prefixEntry{ + {IPV4Prefix: "192.168.0.0/24"}, + }, + V6Prefixes: []prefixEntry{ + {IPV6Prefix: "ff00::/8"}, + {IPV6Prefix: "fe00::/8"}, + }, + }) + defer server.Close() + + ips := newAWSIPs(serverIPRanges(server), time.Hour, nil) + assertEqual(t, 0, len(ips.getCandidateNetworks(make([]byte, 17)))) // 17 bytes does not correspond to any net type + assertEqual(t, 1, len(ips.getCandidateNetworks(make([]byte, 4)))) // netv4 networks + assertEqual(t, 2, len(ips.getCandidateNetworks(make([]byte, 16)))) // netv6 networks +} + +func TestParsing(t *testing.T) { + var data = `{ + "prefixes": [{ + "ip_prefix": "192.168.0.0", + "region": "someregion", + "service": "s3"}], + "ipv6_prefixes": [{ + "ipv6_prefix": "2001:4860:4860::8888", + "region": "anotherregion", + "service": "ec2"}] + }` + rawMockHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(data)) }) + t.Parallel() + server := httptest.NewServer(rawMockHandler) + defer server.Close() + schema, err := fetchAWSIPs(server.URL) + + assertEqual(t, nil, err) + assertEqual(t, 1, len(schema.Prefixes)) + assertEqual(t, prefixEntry{ + IPV4Prefix: "192.168.0.0", + Region: "someregion", + Service: "s3", + }, schema.Prefixes[0]) + assertEqual(t, 1, len(schema.V6Prefixes)) + assertEqual(t, prefixEntry{ + IPV6Prefix: "2001:4860:4860::8888", + Region: "anotherregion", + Service: "ec2", + }, schema.V6Prefixes[0]) +} + +func TestUpdateCalledRegularly(t *testing.T) { + t.Parallel() + + updateCount := 0 + server := httptest.NewServer(http.HandlerFunc( + func(rw http.ResponseWriter, req *http.Request) { + updateCount++ + rw.Write([]byte("ok")) + })) + defer server.Close() + newAWSIPs(fmt.Sprintf("%s/", server.URL), time.Second, nil) + time.Sleep(time.Second*4 + time.Millisecond*500) + if updateCount < 4 { + t.Errorf("Update should have been called at least 4 times, actual=%d", updateCount) + } +} + +func TestEligibleForS3(t *testing.T) { + awsIPs := &awsIPs{ + ipv4: []net.IPNet{{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.IPv4Mask(255, 255, 255, 0), + }}, + initialized: true, + } + empty := context.TODO() + makeContext := func(ip string) context.Context { + req := &http.Request{ + RemoteAddr: ip, + } + + return dcontext.WithRequest(empty, req) + } + + cases := []struct { + Context context.Context + Expected bool + }{ + {Context: empty, Expected: false}, + {Context: makeContext("192.168.1.2"), Expected: true}, + {Context: makeContext("192.168.0.2"), Expected: false}, + } + + for _, testCase := range cases { + name := fmt.Sprintf("Client IP = %v", + testCase.Context.Value("http.request.ip")) + t.Run(name, func(t *testing.T) { + assertEqual(t, testCase.Expected, eligibleForS3(testCase.Context, awsIPs)) + }) + } +} + +func TestEligibleForS3WithAWSIPNotInitialized(t *testing.T) { + awsIPs := &awsIPs{ + ipv4: []net.IPNet{{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.IPv4Mask(255, 255, 255, 0), + }}, + initialized: false, + } + empty := context.TODO() + makeContext := func(ip string) context.Context { + req := &http.Request{ + RemoteAddr: ip, + } + + return dcontext.WithRequest(empty, req) + } + + cases := []struct { + Context context.Context + Expected bool + }{ + {Context: empty, Expected: false}, + {Context: makeContext("192.168.1.2"), Expected: false}, + {Context: makeContext("192.168.0.2"), Expected: false}, + } + + for _, testCase := range cases { + name := fmt.Sprintf("Client IP = %v", + testCase.Context.Value("http.request.ip")) + t.Run(name, func(t *testing.T) { + assertEqual(t, testCase.Expected, eligibleForS3(testCase.Context, awsIPs)) + }) + } +} + +// populate ips with a number of different ipv4 and ipv6 networks, for the purposes +// of benchmarking contains() performance. +func populateRandomNetworks(b *testing.B, ips *awsIPs, ipv4Count, ipv6Count int) { + generateNetworks := func(dest *[]net.IPNet, bytes int, count int) { + for i := 0; i < count; i++ { + ip := make([]byte, bytes) + _, err := rand.Read(ip) + if err != nil { + b.Fatalf("failed to generate network for test : %s", err.Error()) + } + mask := make([]byte, bytes) + for i := 0; i < bytes; i++ { + mask[i] = 0xff + } + *dest = append(*dest, net.IPNet{ + IP: ip, + Mask: mask, + }) + } + } + + generateNetworks(&ips.ipv4, 4, ipv4Count) + generateNetworks(&ips.ipv6, 16, ipv6Count) +} + +func BenchmarkContainsRandom(b *testing.B) { + // Generate a random network configuration, of size comparable to + // aws official networks list + // curl -s https://ip-ranges.amazonaws.com/ip-ranges.json | jq '.prefixes | length' + // 941 + numNetworksPerType := 1000 // keep in sync with the above + // intentionally skip constructor when creating awsIPs, to avoid updater routine. + // This benchmark is only concerned with contains() performance. + awsIPs := awsIPs{} + populateRandomNetworks(b, &awsIPs, numNetworksPerType, numNetworksPerType) + + ipv4 := make([][]byte, b.N) + ipv6 := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + ipv4[i] = make([]byte, 4) + ipv6[i] = make([]byte, 16) + rand.Read(ipv4[i]) + rand.Read(ipv6[i]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + awsIPs.contains(ipv4[i]) + awsIPs.contains(ipv6[i]) + } +} + +func BenchmarkContainsProd(b *testing.B) { + awsIPs := newAWSIPs(defaultIPRangesURL, defaultUpdateFrequency, nil) + ipv4 := make([][]byte, b.N) + ipv6 := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + ipv4[i] = make([]byte, 4) + ipv6[i] = make([]byte, 16) + rand.Read(ipv4[i]) + rand.Read(ipv6[i]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + awsIPs.contains(ipv4[i]) + awsIPs.contains(ipv6[i]) + } +} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go deleted file mode 100644 index 35786f22..00000000 --- a/vendor/github.com/miekg/dns/msg_generate.go +++ /dev/null @@ -1,340 +0,0 @@ -//+build ignore - -// msg_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate pack/unpack methods based on the struct tags. The generated source is -// written to zmsg.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" -) - -var packageHdr = ` -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from msg_generate.go - -package dns - -` - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - fmt.Fprint(b, "// pack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name) - fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress) -if err != nil { - return off, err -} -headerEnd := off -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("off, err = packStringTxt(rr.%s, msg, off)\n") - case `dns:"opt"`: - o("off, err = packDataOpt(rr.%s, msg, off)\n") - case `dns:"nsec"`: - o("off, err = packDataNsec(rr.%s, msg, off)\n") - case `dns:"domain-name"`: - o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - fallthrough - case st.Tag(i) == `dns:"domain-name"`: - o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") - case st.Tag(i) == `dns:"a"`: - o("off, err = packDataA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"aaaa"`: - o("off, err = packDataAAAA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"uint48"`: - o("off, err = packUint48(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"txt"`: - o("off, err = packString(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 - fallthrough - case st.Tag(i) == `dns:"base32"`: - o("off, err = packStringBase32(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("off, err = packStringBase64(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): // Hack to fix empty salt length for NSEC3 - o("if rr.%s == \"-\" { /* do nothing, empty salt */ }\n") - continue - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("off, err = packStringHex(rr.%s, msg, off)\n") - - case st.Tag(i) == `dns:"octet"`: - o("off, err = packStringOctet(rr.%s, msg, off)\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("off, err = packUint8(rr.%s, msg, off)\n") - case types.Uint16: - o("off, err = packUint16(rr.%s, msg, off)\n") - case types.Uint32: - o("off, err = packUint32(rr.%s, msg, off)\n") - case types.Uint64: - o("off, err = packUint64(rr.%s, msg, off)\n") - case types.String: - o("off, err = packString(rr.%s, msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - // We have packed everything, only now we know the rdlength of this RR - fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)") - fmt.Fprintln(b, "return off, nil }\n") - } - - fmt.Fprint(b, "// unpack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name) - fmt.Fprintf(b, "rr := new(%s)\n", name) - fmt.Fprint(b, "rr.Hdr = h\n") - fmt.Fprint(b, `if noRdata(h) { -return rr, off, nil - } -var err error -rdStart := off -_ = rdStart - -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return rr, off, err -} -`) - } - - // size-* are special, because they reference a struct member we should use for the length. - if strings.HasPrefix(st.Tag(i), `dns:"size-`) { - structMember := structMember(st.Tag(i)) - structTag := structTag(st.Tag(i)) - switch structTag { - case "hex": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base32": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base64": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - fmt.Fprint(b, `if err != nil { -return rr, off, err -} -`) - continue - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("rr.%s, off, err = unpackStringTxt(msg, off)\n") - case `dns:"opt"`: - o("rr.%s, off, err = unpackDataOpt(msg, off)\n") - case `dns:"nsec"`: - o("rr.%s, off, err = unpackDataNsec(msg, off)\n") - case `dns:"domain-name"`: - o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"cdomain-name"`: - fallthrough - case `dns:"domain-name"`: - o("rr.%s, off, err = UnpackDomainName(msg, off)\n") - case `dns:"a"`: - o("rr.%s, off, err = unpackDataA(msg, off)\n") - case `dns:"aaaa"`: - o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") - case `dns:"uint48"`: - o("rr.%s, off, err = unpackUint48(msg, off)\n") - case `dns:"txt"`: - o("rr.%s, off, err = unpackString(msg, off)\n") - case `dns:"base32"`: - o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"base64"`: - o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"hex"`: - o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"octet"`: - o("rr.%s, off, err = unpackStringOctet(msg, off)\n") - case "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("rr.%s, off, err = unpackUint8(msg, off)\n") - case types.Uint16: - o("rr.%s, off, err = unpackUint16(msg, off)\n") - case types.Uint32: - o("rr.%s, off, err = unpackUint32(msg, off)\n") - case types.Uint64: - o("rr.%s, off, err = unpackUint64(msg, off)\n") - case types.String: - o("rr.%s, off, err = unpackString(msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - // If we've hit len(msg) we return without error. - if i < st.NumFields()-1 { - fmt.Fprintf(b, `if off == len(msg) { -return rr, off, nil - } -`) - } - } - fmt.Fprintf(b, "return rr, off, err }\n\n") - } - // Generate typeToUnpack map - fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){") - for _, name := range namedTypes { - if name == "RFC3597" { - continue - } - fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name) - } - fmt.Fprintln(b, "}\n") - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zmsg.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. -func structMember(s string) string { - fields := strings.Split(s, ":") - if len(fields) == 0 { - return "" - } - f := fields[len(fields)-1] - // f should have a closing " - if len(f) > 1 { - return f[:len(f)-1] - } - return f -} - -// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. -func structTag(s string) string { - fields := strings.Split(s, ":") - if len(fields) < 2 { - return "" - } - return fields[1][len("\"size-"):] -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go deleted file mode 100644 index bf80da32..00000000 --- a/vendor/github.com/miekg/dns/types_generate.go +++ /dev/null @@ -1,271 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" - "text/template" -) - -var skipLen = map[string]struct{}{ - "NSEC": {}, - "NSEC3": {}, - "OPT": {}, -} - -var packageHdr = ` -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from type_generate.go - -package dns - -import ( - "encoding/base64" - "net" -) - -` - -var TypeToRR = template.Must(template.New("TypeToRR").Parse(` -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ -{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, -{{end}}{{end}} } - -`)) - -var typeToString = template.Must(template.New("typeToString").Parse(` -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ -{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", -{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", -} - -`)) - -var headerFunc = template.Must(template.New("headerFunc").Parse(` -// Header() functions -{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } -{{end}} - -`)) - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect constants like TypeX - var numberedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - b, ok := o.Type().(*types.Basic) - if !ok || b.Kind() != types.Uint16 { - continue - } - if !strings.HasPrefix(o.Name(), "Type") { - continue - } - name := strings.TrimPrefix(o.Name(), "Type") - if name == "PrivateRR" { - continue - } - numberedTypes = append(numberedTypes, name) - } - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate TypeToRR - fatalIfErr(TypeToRR.Execute(b, namedTypes)) - - // Generate typeToString - fatalIfErr(typeToString.Execute(b, numberedTypes)) - - // Generate headerFunc - fatalIfErr(headerFunc.Execute(b, namedTypes)) - - // Generate len() - fmt.Fprint(b, "// len() functions\n") - for _, name := range namedTypes { - if _, ok := skipLen[name]; ok { - continue - } - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) len() int {\n", name) - fmt.Fprintf(b, "l := rr.Hdr.len()\n") - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`: - o("for _, x := range rr.%s { l += len(x) + 1 }\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: - // ignored - case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`: - o("l += len(rr.%s) + 1\n") - case st.Tag(i) == `dns:"octet"`: - o("l += len(rr.%s)\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("l += len(rr.%s)/2 + 1\n") - case st.Tag(i) == `dns:"a"`: - o("l += net.IPv4len // %s\n") - case st.Tag(i) == `dns:"aaaa"`: - o("l += net.IPv6len // %s\n") - case st.Tag(i) == `dns:"txt"`: - o("for _, t := range rr.%s { l += len(t) + 1 }\n") - case st.Tag(i) == `dns:"uint48"`: - o("l += 6 // %s\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("l += 1 // %s\n") - case types.Uint16: - o("l += 2 // %s\n") - case types.Uint32: - o("l += 4 // %s\n") - case types.Uint64: - o("l += 8 // %s\n") - case types.String: - o("l += len(rr.%s) + 1\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintf(b, "return l }\n") - } - - // Generate copy() - fmt.Fprint(b, "// copy() functions\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) - fields := []string{"*rr.Hdr.copyHeader()"} - for i := 1; i < st.NumFields(); i++ { - f := st.Field(i).Name() - if sl, ok := st.Field(i).Type().(*types.Slice); ok { - t := sl.Underlying().String() - t = strings.TrimPrefix(t, "[]") - if strings.Contains(t, ".") { - splits := strings.Split(t, ".") - t = splits[len(splits)-1] - } - fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", - f, t, f, f, f) - fields = append(fields, f) - continue - } - if st.Field(i).Type().String() == "net.IP" { - fields = append(fields, "copyIP(rr."+f+")") - continue - } - fields = append(fields, "rr."+f) - } - fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) - fmt.Fprintf(b, "}\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("ztypes.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go deleted file mode 100644 index 3daa8979..00000000 --- a/vendor/golang.org/x/net/idna/idna.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package idna implements IDNA2008 (Internationalized Domain Names for -// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and -// RFC 5894. -package idna // import "golang.org/x/net/idna" - -import ( - "strings" - "unicode/utf8" -) - -// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or -// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 - -// acePrefix is the ASCII Compatible Encoding prefix. -const acePrefix = "xn--" - -// ToASCII converts a domain or domain label to its ASCII form. For example, -// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and -// ToASCII("golang") is "golang". -func ToASCII(s string) (string, error) { - if ascii(s) { - return s, nil - } - labels := strings.Split(s, ".") - for i, label := range labels { - if !ascii(label) { - a, err := encode(acePrefix, label) - if err != nil { - return "", err - } - labels[i] = a - } - } - return strings.Join(labels, "."), nil -} - -// ToUnicode converts a domain or domain label to its Unicode form. For example, -// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and -// ToUnicode("golang") is "golang". -func ToUnicode(s string) (string, error) { - if !strings.Contains(s, acePrefix) { - return s, nil - } - labels := strings.Split(s, ".") - for i, label := range labels { - if strings.HasPrefix(label, acePrefix) { - u, err := decode(label[len(acePrefix):]) - if err != nil { - return "", err - } - labels[i] = u - } - } - return strings.Join(labels, "."), nil -} - -func ascii(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go deleted file mode 100644 index 92e733f6..00000000 --- a/vendor/golang.org/x/net/idna/punycode.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idna - -// This file implements the Punycode algorithm from RFC 3492. - -import ( - "fmt" - "math" - "strings" - "unicode/utf8" -) - -// These parameter values are specified in section 5. -// -// All computation is done with int32s, so that overflow behavior is identical -// regardless of whether int is 32-bit or 64-bit. -const ( - base int32 = 36 - damp int32 = 700 - initialBias int32 = 72 - initialN int32 = 128 - skew int32 = 38 - tmax int32 = 26 - tmin int32 = 1 -) - -// decode decodes a string as specified in section 6.2. -func decode(encoded string) (string, error) { - if encoded == "" { - return "", nil - } - pos := 1 + strings.LastIndex(encoded, "-") - if pos == 1 { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - if pos == len(encoded) { - return encoded[:len(encoded)-1], nil - } - output := make([]rune, 0, len(encoded)) - if pos != 0 { - for _, r := range encoded[:pos-1] { - output = append(output, r) - } - } - i, n, bias := int32(0), initialN, initialBias - for pos < len(encoded) { - oldI, w := i, int32(1) - for k := base; ; k += base { - if pos == len(encoded) { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - digit, ok := decodeDigit(encoded[pos]) - if !ok { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - pos++ - i += digit * w - if i < 0 { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if digit < t { - break - } - w *= base - t - if w >= math.MaxInt32/base { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - } - x := int32(len(output) + 1) - bias = adapt(i-oldI, x, oldI == 0) - n += i / x - i %= x - if n > utf8.MaxRune || len(output) >= 1024 { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - output = append(output, 0) - copy(output[i+1:], output[i:]) - output[i] = n - i++ - } - return string(output), nil -} - -// encode encodes a string as specified in section 6.3 and prepends prefix to -// the result. -// -// The "while h < length(input)" line in the specification becomes "for -// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. -func encode(prefix, s string) (string, error) { - output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) - copy(output, prefix) - delta, n, bias := int32(0), initialN, initialBias - b, remaining := int32(0), int32(0) - for _, r := range s { - if r < 0x80 { - b++ - output = append(output, byte(r)) - } else { - remaining++ - } - } - h := b - if b > 0 { - output = append(output, '-') - } - for remaining != 0 { - m := int32(0x7fffffff) - for _, r := range s { - if m > r && r >= n { - m = r - } - } - delta += (m - n) * (h + 1) - if delta < 0 { - return "", fmt.Errorf("idna: invalid label %q", s) - } - n = m - for _, r := range s { - if r < n { - delta++ - if delta < 0 { - return "", fmt.Errorf("idna: invalid label %q", s) - } - continue - } - if r > n { - continue - } - q := delta - for k := base; ; k += base { - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if q < t { - break - } - output = append(output, encodeDigit(t+(q-t)%(base-t))) - q = (q - t) / (base - t) - } - output = append(output, encodeDigit(q)) - bias = adapt(delta, h+1, h == b) - delta = 0 - h++ - remaining-- - } - delta++ - n++ - } - return string(output), nil -} - -func decodeDigit(x byte) (digit int32, ok bool) { - switch { - case '0' <= x && x <= '9': - return int32(x - ('0' - 26)), true - case 'A' <= x && x <= 'Z': - return int32(x - 'A'), true - case 'a' <= x && x <= 'z': - return int32(x - 'a'), true - } - return 0, false -} - -func encodeDigit(digit int32) byte { - switch { - case 0 <= digit && digit < 26: - return byte(digit + 'a') - case 26 <= digit && digit < 36: - return byte(digit + ('0' - 26)) - } - panic("idna: internal error in punycode encoding") -} - -// adapt is the bias adaptation function specified in section 6.1. -func adapt(delta, numPoints int32, firstTime bool) int32 { - if firstTime { - delta /= damp - } else { - delta /= 2 - } - delta += delta / numPoints - k := int32(0) - for delta > ((base-tmin)*tmax)/2 { - delta /= base - tmin - k += base - } - return k + (base-tmin+1)*delta/(delta+skew) -} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go deleted file mode 100644 index 5c8d7b5f..00000000 --- a/vendor/golang.org/x/net/publicsuffix/gen.go +++ /dev/null @@ -1,663 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates table.go and table_test.go. -// Invoke as: -// -// go run gen.go -version "xxx" >table.go -// go run gen.go -version "xxx" -test >table_test.go -// -// Pass -v to print verbose progress information. -// -// The version is derived from information found at -// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat -// -// To fetch a particular git revision, such as 5c70ccd250, pass -// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "go/format" - "io" - "net/http" - "os" - "regexp" - "sort" - "strings" - - "golang.org/x/net/idna" -) - -const ( - // These sum of these four values must be no greater than 32. - nodesBitsChildren = 9 - nodesBitsICANN = 1 - nodesBitsTextOffset = 15 - nodesBitsTextLength = 6 - - // These sum of these four values must be no greater than 32. - childrenBitsWildcard = 1 - childrenBitsNodeType = 2 - childrenBitsHi = 14 - childrenBitsLo = 14 -) - -var ( - maxChildren int - maxTextOffset int - maxTextLength int - maxHi uint32 - maxLo uint32 -) - -func max(a, b int) int { - if a < b { - return b - } - return a -} - -func u32max(a, b uint32) uint32 { - if a < b { - return b - } - return a -} - -const ( - nodeTypeNormal = 0 - nodeTypeException = 1 - nodeTypeParentOnly = 2 - numNodeType = 3 -) - -func nodeTypeStr(n int) string { - switch n { - case nodeTypeNormal: - return "+" - case nodeTypeException: - return "!" - case nodeTypeParentOnly: - return "o" - } - panic("unreachable") -} - -var ( - labelEncoding = map[string]uint32{} - labelsList = []string{} - labelsMap = map[string]bool{} - rules = []string{} - - // validSuffix is used to check that the entries in the public suffix list - // are in canonical form (after Punycode encoding). Specifically, capital - // letters are not allowed. - validSuffix = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) - - subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") - url = flag.String("url", - "https://publicsuffix.org/list/effective_tld_names.dat", - "URL of the publicsuffix.org list. If empty, stdin is read instead") - v = flag.Bool("v", false, "verbose output (to stderr)") - version = flag.String("version", "", "the effective_tld_names.dat version") - test = flag.Bool("test", false, "generate table_test.go") -) - -func main() { - if err := main1(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main1() error { - flag.Parse() - if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { - return fmt.Errorf("not enough bits to encode the nodes table") - } - if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { - return fmt.Errorf("not enough bits to encode the children table") - } - if *version == "" { - return fmt.Errorf("-version was not specified") - } - var r io.Reader = os.Stdin - if *url != "" { - res, err := http.Get(*url) - if err != nil { - return err - } - if res.StatusCode != http.StatusOK { - return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) - } - r = res.Body - defer res.Body.Close() - } - - var root node - icann := false - buf := new(bytes.Buffer) - br := bufio.NewReader(r) - for { - s, err := br.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } - return err - } - s = strings.TrimSpace(s) - if strings.Contains(s, "BEGIN ICANN DOMAINS") { - icann = true - continue - } - if strings.Contains(s, "END ICANN DOMAINS") { - icann = false - continue - } - if s == "" || strings.HasPrefix(s, "//") { - continue - } - s, err = idna.ToASCII(s) - if err != nil { - return err - } - if !validSuffix.MatchString(s) { - return fmt.Errorf("bad publicsuffix.org list data: %q", s) - } - - if *subset { - switch { - case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): - case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): - case s == "ao" || strings.HasSuffix(s, ".ao"): - case s == "ar" || strings.HasSuffix(s, ".ar"): - case s == "arpa" || strings.HasSuffix(s, ".arpa"): - case s == "cy" || strings.HasSuffix(s, ".cy"): - case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): - case s == "jp": - case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): - case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): - case s == "om" || strings.HasSuffix(s, ".om"): - case s == "uk" || strings.HasSuffix(s, ".uk"): - case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): - case s == "tw" || strings.HasSuffix(s, ".tw"): - case s == "zw" || strings.HasSuffix(s, ".zw"): - case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): - // xn--p1ai is Russian-Cyrillic "рф". - default: - continue - } - } - - rules = append(rules, s) - - nt, wildcard := nodeTypeNormal, false - switch { - case strings.HasPrefix(s, "*."): - s, nt = s[2:], nodeTypeParentOnly - wildcard = true - case strings.HasPrefix(s, "!"): - s, nt = s[1:], nodeTypeException - } - labels := strings.Split(s, ".") - for n, i := &root, len(labels)-1; i >= 0; i-- { - label := labels[i] - n = n.child(label) - if i == 0 { - if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { - n.nodeType = nt - } - n.icann = n.icann && icann - n.wildcard = n.wildcard || wildcard - } - labelsMap[label] = true - } - } - labelsList = make([]string, 0, len(labelsMap)) - for label := range labelsMap { - labelsList = append(labelsList, label) - } - sort.Strings(labelsList) - - p := printReal - if *test { - p = printTest - } - if err := p(buf, &root); err != nil { - return err - } - - b, err := format.Source(buf.Bytes()) - if err != nil { - return err - } - _, err = os.Stdout.Write(b) - return err -} - -func printTest(w io.Writer, n *node) error { - fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") - fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") - for _, rule := range rules { - fmt.Fprintf(w, "%q,\n", rule) - } - fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") - if err := n.walk(w, printNodeLabel); err != nil { - return err - } - fmt.Fprintf(w, "}\n") - return nil -} - -func printReal(w io.Writer, n *node) error { - const header = `// generated by go run gen.go; DO NOT EDIT - -package publicsuffix - -const version = %q - -const ( - nodesBitsChildren = %d - nodesBitsICANN = %d - nodesBitsTextOffset = %d - nodesBitsTextLength = %d - - childrenBitsWildcard = %d - childrenBitsNodeType = %d - childrenBitsHi = %d - childrenBitsLo = %d -) - -const ( - nodeTypeNormal = %d - nodeTypeException = %d - nodeTypeParentOnly = %d -) - -// numTLD is the number of top level domains. -const numTLD = %d - -` - fmt.Fprintf(w, header, *version, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, - nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) - - text := combineText(labelsList) - if text == "" { - return fmt.Errorf("internal error: makeText returned no text") - } - for _, label := range labelsList { - offset, length := strings.Index(text, label), len(label) - if offset < 0 { - return fmt.Errorf("internal error: could not find %q in text %q", label, text) - } - maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) - if offset >= 1<= 1< 64 { - n, plus = 64, " +" - } - fmt.Fprintf(w, "%q%s\n", text[:n], plus) - text = text[n:] - } - - if err := n.walk(w, assignIndexes); err != nil { - return err - } - - fmt.Fprintf(w, ` - -// nodes is the list of nodes. Each node is represented as a uint32, which -// encodes the node's children, wildcard bit and node type (as an index into -// the children array), ICANN bit and text. -// -// In the //-comment after each node's data, the nodes indexes of the children -// are formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The -// nodeType is printed as + for normal, ! for exception, and o for parent-only -// nodes that have children but don't match a domain label in their own right. -// An I denotes an ICANN domain. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] children index -// [%2d bits] ICANN bit -// [%2d bits] text index -// [%2d bits] text length -var nodes = [...]uint32{ -`, - 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) - if err := n.walk(w, printNode); err != nil { - return err - } - fmt.Fprintf(w, `} - -// children is the list of nodes' children, the parent's wildcard bit and the -// parent's node type. If a node has no children then their children index -// will be in the range [0, 6), depending on the wildcard bit and node type. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] wildcard bit -// [%2d bits] node type -// [%2d bits] high nodes index (exclusive) of children -// [%2d bits] low nodes index (inclusive) of children -var children=[...]uint32{ -`, - 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) - for i, c := range childrenEncoding { - s := "---------------" - lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 - fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", - c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) - } - fmt.Fprintf(w, "}\n\n") - fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { - ss = ss[1:] - } - return ss -} - -// crush combines a list of strings, taking advantage of overlaps. It returns a -// single string that contains each input string as a substring. -func crush(ss []string) string { - maxLabelLen := 0 - for _, s := range ss { - if maxLabelLen < len(s) { - maxLabelLen = len(s) - } - } - - for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { - prefixes := makePrefixMap(ss, prefixLen) - for i, s := range ss { - if len(s) <= prefixLen { - continue - } - mergeLabel(ss, i, prefixLen, prefixes) - } - } - - return strings.Join(ss, "") -} - -// mergeLabel merges the label at ss[i] with the first available matching label -// in prefixMap, where the last "prefixLen" characters in ss[i] match the first -// "prefixLen" characters in the matching label. -// It will merge ss[i] repeatedly until no more matches are available. -// All matching labels merged into ss[i] are replaced by "". -func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { - s := ss[i] - suffix := s[len(s)-prefixLen:] - for _, j := range prefixes[suffix] { - // Empty strings mean "already used." Also avoid merging with self. - if ss[j] == "" || i == j { - continue - } - if *v { - fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", - prefixLen, i, j, ss[i], ss[j], suffix) - } - ss[i] += ss[j][prefixLen:] - ss[j] = "" - // ss[i] has a new suffix, so merge again if possible. - // Note: we only have to merge again at the same prefix length. Shorter - // prefix lengths will be handled in the next iteration of crush's for loop. - // Can there be matches for longer prefix lengths, introduced by the merge? - // I believe that any such matches would by necessity have been eliminated - // during substring removal or merged at a higher prefix length. For - // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" - // would yield "abcde", which could be merged with "bcdef." However, in - // practice "cde" would already have been elimintated by removeSubstrings. - mergeLabel(ss, i, prefixLen, prefixes) - return - } -} - -// prefixMap maps from a prefix to a list of strings containing that prefix. The -// list of strings is represented as indexes into a slice of strings stored -// elsewhere. -type prefixMap map[string][]int - -// makePrefixMap constructs a prefixMap from a slice of strings. -func makePrefixMap(ss []string, prefixLen int) prefixMap { - prefixes := make(prefixMap) - for i, s := range ss { - // We use < rather than <= because if a label matches on a prefix equal to - // its full length, that's actually a substring match handled by - // removeSubstrings. - if prefixLen < len(s) { - prefix := s[:prefixLen] - prefixes[prefix] = append(prefixes[prefix], i) - } - } - - return prefixes -} From 8777e97b72eb30eb8609bafa86e6612f7b9635c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Raphae=CC=88l=20Enrici?= Date: Sun, 17 Dec 2017 18:26:09 +0100 Subject: [PATCH 072/276] fixes #2249: sanitize tenant and tenantid MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If tenant or tenantid are passed as env variables, we systematically use Sprint to make sure they are string and not integer as it would make mapstructure fail. Signed-off-by: Raphaël Enrici --- registry/storage/driver/swift/swift.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/registry/storage/driver/swift/swift.go b/registry/storage/driver/swift/swift.go index 11a33264..60ebea4a 100644 --- a/registry/storage/driver/swift/swift.go +++ b/registry/storage/driver/swift/swift.go @@ -142,6 +142,19 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { InsecureSkipVerify: false, } + // Sanitize some entries before trying to decode parameters with mapstructure + // TenantID and Tenant when integers only and passed as ENV variables + // are considered as integer and not string. The parser fails in this + // case. + _, ok := parameters["tenant"] + if ok { + parameters["tenant"] = fmt.Sprint(parameters["tenant"]) + } + _, ok = parameters["tenantid"] + if ok { + parameters["tenantid"] = fmt.Sprint(parameters["tenantid"]) + } + if err := mapstructure.Decode(parameters, ¶ms); err != nil { return nil, err } From e9864ce8b9d5676f23a46d67403bfba6c8a54cc8 Mon Sep 17 00:00:00 2001 From: Viktor Stanchev Date: Mon, 18 Dec 2017 15:06:04 -0800 Subject: [PATCH 073/276] disable schema1 by default, add a config flag to enable it port of #2473 Signed-off-by: Viktor Stanchev --- configuration/configuration.go | 2 ++ .../registry-config-notls.yml | 3 ++ .../tokenserver-oauth/registry-config.yml | 3 ++ .../tokenserver/registry-config.yml | 3 ++ docs/configuration.md | 3 ++ errors.go | 4 +++ notifications/listener_test.go | 2 +- registry/handlers/api_test.go | 4 +++ registry/handlers/app.go | 4 +++ registry/proxy/proxymanifeststore_test.go | 5 +-- registry/storage/catalog_test.go | 4 +-- registry/storage/garbagecollect_test.go | 2 +- registry/storage/manifeststore_test.go | 21 ++++++++++-- registry/storage/registry.go | 34 +++++++++++++++---- registry/storage/v1unsupportedhandler.go | 23 +++++++++++++ 15 files changed, 103 insertions(+), 14 deletions(-) create mode 100644 registry/storage/v1unsupportedhandler.go diff --git a/configuration/configuration.go b/configuration/configuration.go index cdc996b9..ac38eedf 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -184,6 +184,8 @@ type Configuration struct { // TrustKey is the signing key to use for adding the signature to // schema1 manifests. TrustKey string `yaml:"signingkeyfile,omitempty"` + // Enabled determines if schema1 manifests should be pullable + Enabled bool `yaml:"enabled,omitempty"` } `yaml:"schema1,omitempty"` } `yaml:"compatibility,omitempty"` diff --git a/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml b/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml index ed6b3ea5..a700d08c 100644 --- a/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml +++ b/contrib/docker-integration/tokenserver-oauth/registry-config-notls.yml @@ -7,6 +7,9 @@ storage: rootdirectory: /tmp/registry-dev http: addr: 0.0.0.0:5000 +compatibility: + schema1: + enabled: true auth: token: realm: "https://auth.localregistry:5559/token/" diff --git a/contrib/docker-integration/tokenserver-oauth/registry-config.yml b/contrib/docker-integration/tokenserver-oauth/registry-config.yml index 630ef057..226798b3 100644 --- a/contrib/docker-integration/tokenserver-oauth/registry-config.yml +++ b/contrib/docker-integration/tokenserver-oauth/registry-config.yml @@ -10,6 +10,9 @@ http: tls: certificate: "/etc/docker/registry/localregistry.cert" key: "/etc/docker/registry/localregistry.key" +compatibility: + schema1: + enabled: true auth: token: realm: "https://auth.localregistry:5559/token/" diff --git a/contrib/docker-integration/tokenserver/registry-config.yml b/contrib/docker-integration/tokenserver/registry-config.yml index bc269056..b9efdd3a 100644 --- a/contrib/docker-integration/tokenserver/registry-config.yml +++ b/contrib/docker-integration/tokenserver/registry-config.yml @@ -10,6 +10,9 @@ http: tls: certificate: "/etc/docker/registry/localregistry.cert" key: "/etc/docker/registry/localregistry.key" +compatibility: + schema1: + enabled: true auth: token: realm: "https://auth.localregistry:5556/token/" diff --git a/docs/configuration.md b/docs/configuration.md index 807353cc..a876b33c 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -271,6 +271,7 @@ proxy: compatibility: schema1: signingkeyfile: /etc/registry/key.json + enabled: true validation: manifests: urls: @@ -1028,6 +1029,7 @@ username (such as `batman`) and the password for that username. compatibility: schema1: signingkeyfile: /etc/registry/key.json + enabled: true ``` Use the `compatibility` structure to configure handling of older and deprecated @@ -1038,6 +1040,7 @@ features. Each subsection defines such a feature with configurable behavior. | Parameter | Required | Description | |-----------|----------|-------------------------------------------------------| | `signingkeyfile` | no | The signing private key used to add signatures to `schema1` manifests. If no signing key is provided, a new ECDSA key is generated when the registry starts. | +| `enabled` | no | If this is not set to true, `schema1` manifests cannot be pushed. | ## `validation` diff --git a/errors.go b/errors.go index 020d3325..8e0b788d 100644 --- a/errors.go +++ b/errors.go @@ -20,6 +20,10 @@ var ErrManifestNotModified = errors.New("manifest not modified") // performed var ErrUnsupported = errors.New("operation unsupported") +// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 +// manifest but the registry is configured to reject it +var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") + // ErrTagUnknown is returned if the given tag is not known by the tag service type ErrTagUnknown struct { Tag string diff --git a/notifications/listener_test.go b/notifications/listener_test.go index a5849807..be9e81af 100644 --- a/notifications/listener_test.go +++ b/notifications/listener_test.go @@ -25,7 +25,7 @@ func TestListener(t *testing.T) { t.Fatal(err) } - registry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect, storage.Schema1SigningKey(k)) + registry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect, storage.Schema1SigningKey(k), storage.EnableSchema1) if err != nil { t.Fatalf("error creating registry: %v", err) } diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go index 98dcaa71..6b595f97 100644 --- a/registry/handlers/api_test.go +++ b/registry/handlers/api_test.go @@ -2027,6 +2027,7 @@ func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { RemoteURL: "http://example.com", }, } + config.Compatibility.Schema1.Enabled = true return newTestEnvWithConfig(t, &config) @@ -2043,6 +2044,7 @@ func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { }, } + config.Compatibility.Schema1.Enabled = true config.HTTP.Headers = headerConfig return newTestEnvWithConfig(t, &config) @@ -2565,6 +2567,7 @@ func TestProxyManifestGetByTag(t *testing.T) { }}, }, } + truthConfig.Compatibility.Schema1.Enabled = true truthConfig.HTTP.Headers = headerConfig imageName, _ := reference.WithName("foo/bar") @@ -2583,6 +2586,7 @@ func TestProxyManifestGetByTag(t *testing.T) { RemoteURL: truthEnv.server.URL, }, } + proxyConfig.Compatibility.Schema1.Enabled = true proxyConfig.HTTP.Headers = headerConfig proxyEnv := newTestEnvWithConfig(t, &proxyConfig) diff --git a/registry/handlers/app.go b/registry/handlers/app.go index e38050cb..9b774aa0 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -174,6 +174,10 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { options = append(options, storage.Schema1SigningKey(app.trustKey)) + if config.Compatibility.Schema1.Enabled { + options = append(options, storage.EnableSchema1) + } + if config.HTTP.Host != "" { u, err := url.Parse(config.HTTP.Host) if err != nil { diff --git a/registry/proxy/proxymanifeststore_test.go b/registry/proxy/proxymanifeststore_test.go index 5f3bd34e..b80b303e 100644 --- a/registry/proxy/proxymanifeststore_test.go +++ b/registry/proxy/proxymanifeststore_test.go @@ -95,7 +95,8 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE ctx := context.Background() truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), - storage.Schema1SigningKey(k)) + storage.Schema1SigningKey(k), + storage.EnableSchema1) if err != nil { t.Fatalf("error creating registry: %v", err) } @@ -117,7 +118,7 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE t.Fatalf(err.Error()) } - localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption, storage.Schema1SigningKey(k)) + localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption, storage.Schema1SigningKey(k), storage.EnableSchema1) if err != nil { t.Fatalf("error creating registry: %v", err) } diff --git a/registry/storage/catalog_test.go b/registry/storage/catalog_test.go index 696e024e..45cd2643 100644 --- a/registry/storage/catalog_test.go +++ b/registry/storage/catalog_test.go @@ -26,7 +26,7 @@ type setupEnv struct { func setupFS(t *testing.T) *setupEnv { d := inmemory.New() ctx := context.Background() - registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect, EnableSchema1) if err != nil { t.Fatalf("error creating registry: %v", err) } @@ -207,7 +207,7 @@ func testEq(a, b []string, size int) bool { func setupBadWalkEnv(t *testing.T) *setupEnv { d := newBadListDriver() ctx := context.Background() - registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) + registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect, EnableSchema1) if err != nil { t.Fatalf("error creating registry: %v", err) } diff --git a/registry/storage/garbagecollect_test.go b/registry/storage/garbagecollect_test.go index 2e36fddb..21debc5b 100644 --- a/registry/storage/garbagecollect_test.go +++ b/registry/storage/garbagecollect_test.go @@ -27,7 +27,7 @@ func createRegistry(t *testing.T, driver driver.StorageDriver, options ...Regist if err != nil { t.Fatal(err) } - options = append([]RegistryOption{EnableDelete, Schema1SigningKey(k)}, options...) + options = append([]RegistryOption{EnableDelete, Schema1SigningKey(k), EnableSchema1}, options...) registry, err := NewRegistry(ctx, driver, options...) if err != nil { t.Fatalf("Failed to construct namespace") diff --git a/registry/storage/manifeststore_test.go b/registry/storage/manifeststore_test.go index 2fad7611..a22332ba 100644 --- a/registry/storage/manifeststore_test.go +++ b/registry/storage/manifeststore_test.go @@ -56,10 +56,18 @@ func TestManifestStorage(t *testing.T) { if err != nil { t.Fatal(err) } - testManifestStorage(t, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, Schema1SigningKey(k)) + testManifestStorage(t, true, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, Schema1SigningKey(k), EnableSchema1) } -func testManifestStorage(t *testing.T, options ...RegistryOption) { +func TestManifestStorageV1Unsupported(t *testing.T) { + k, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + testManifestStorage(t, false, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect, Schema1SigningKey(k)) +} + +func testManifestStorage(t *testing.T, schema1Enabled bool, options ...RegistryOption) { repoName, _ := reference.WithName("foo/bar") env := newManifestStoreTestEnv(t, repoName, "thetag", options...) ctx := context.Background() @@ -111,6 +119,15 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) { t.Fatalf("expected errors putting manifest with full verification") } + // If schema1 is not enabled, do a short version of this test, just checking + // if we get the right error when we Put + if !schema1Enabled { + if err != distribution.ErrSchemaV1Unsupported { + t.Fatalf("got the wrong error when schema1 is disabled: %s", err) + } + return + } + switch err := err.(type) { case distribution.ErrManifestVerification: if len(err) != 2 { diff --git a/registry/storage/registry.go b/registry/storage/registry.go index 46b96853..66149e95 100644 --- a/registry/storage/registry.go +++ b/registry/storage/registry.go @@ -19,6 +19,7 @@ type registry struct { statter *blobStatter // global statter service. blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider deleteEnabled bool + schema1Enabled bool resumableDigestEnabled bool schema1SigningKey libtrust.PrivateKey blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory @@ -48,6 +49,13 @@ func EnableDelete(registry *registry) error { return nil } +// EnableSchema1 is a functional option for NewRegistry. It enables pushing of +// schema1 manifests. +func EnableSchema1(registry *registry) error { + registry.schema1Enabled = true + return nil +} + // DisableDigestResumption is a functional option for NewRegistry. It should be // used if the registry is acting as a caching proxy. func DisableDigestResumption(registry *registry) error { @@ -237,16 +245,30 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M linkDirectoryPathSpec: manifestDirectoryPathSpec, } - ms := &manifestStore{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - schema1Handler: &signedManifestHandler{ + var v1Handler ManifestHandler + if repo.schema1Enabled { + v1Handler = &signedManifestHandler{ ctx: ctx, schema1SigningKey: repo.schema1SigningKey, repository: repo, blobStore: blobStore, - }, + } + } else { + v1Handler = &v1UnsupportedHandler{ + innerHandler: &signedManifestHandler{ + ctx: ctx, + schema1SigningKey: repo.schema1SigningKey, + repository: repo, + blobStore: blobStore, + }, + } + } + + ms := &manifestStore{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + schema1Handler: v1Handler, schema2Handler: &schema2ManifestHandler{ ctx: ctx, repository: repo, diff --git a/registry/storage/v1unsupportedhandler.go b/registry/storage/v1unsupportedhandler.go new file mode 100644 index 00000000..fb6ca14e --- /dev/null +++ b/registry/storage/v1unsupportedhandler.go @@ -0,0 +1,23 @@ +package storage + +import ( + "context" + + "github.com/docker/distribution" + digest "github.com/opencontainers/go-digest" +) + +// signedManifestHandler is a ManifestHandler that unmarshals v1 manifests but +// refuses to Put v1 manifests +type v1UnsupportedHandler struct { + innerHandler ManifestHandler +} + +var _ ManifestHandler = &v1UnsupportedHandler{} + +func (v *v1UnsupportedHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + return v.innerHandler.Unmarshal(ctx, dgst, content) +} +func (v *v1UnsupportedHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + return digest.Digest(""), distribution.ErrSchemaV1Unsupported +} From eaa90a3c6d217c0b908506112949ef25e38e70af Mon Sep 17 00:00:00 2001 From: Per Lundberg Date: Fri, 29 Dec 2017 14:34:32 +0200 Subject: [PATCH 074/276] Update CONTRIBUTING.md Fixed the headers. They were rendering incorrectly on GitHub for some weird reason; I copied the strings and rewrote the ## characters at the beginning and now they render as they should. Signed-off-by: Per Lundberg --- CONTRIBUTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index afe4e92a..4c067d9e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing to the registry -## Before reporting an issue... +## Before reporting an issue... ### If your problem is with... @@ -35,7 +35,7 @@ By following these simple rules you will get better and faster feedback on your - search the bugtracker for an already reported issue -### If you found an issue that describes your problem: +### If you found an issue that describes your problem: - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - please refrain from adding "same thing here" or "+1" comments From aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9 Mon Sep 17 00:00:00 2001 From: Per Lundberg Date: Fri, 29 Dec 2017 14:40:15 +0200 Subject: [PATCH 075/276] api.md: Fixed incorrect grammar I noted this while reading at https://docs.docker.com/registry/spec/api/ Signed-off-by: Per Lundberg --- docs/spec/api.md | 2 +- docs/spec/api.md.tmpl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/spec/api.md b/docs/spec/api.md index 0a2fc5e5..0a4009c8 100644 --- a/docs/spec/api.md +++ b/docs/spec/api.md @@ -16,7 +16,7 @@ of this API, known as _Docker Registry HTTP API V2_. While the V1 registry protocol is usable, there are several problems with the architecture that have led to this new version. The main driver of this -specification is a set of changes to the docker the image format, covered in +specification is a set of changes to the Docker image format, covered in [docker/docker#8093](https://github.com/docker/docker/issues/8093). The new, self-contained image manifest simplifies image definition and improves security. This specification will build on that work, leveraging new properties diff --git a/docs/spec/api.md.tmpl b/docs/spec/api.md.tmpl index 79879ad6..3e61ba4c 100644 --- a/docs/spec/api.md.tmpl +++ b/docs/spec/api.md.tmpl @@ -16,7 +16,7 @@ of this API, known as _Docker Registry HTTP API V2_. While the V1 registry protocol is usable, there are several problems with the architecture that have led to this new version. The main driver of this -specification is a set of changes to the docker the image format, covered in +specification is a set of changes to the Docker image format, covered in [docker/docker#8093](https://github.com/docker/docker/issues/8093). The new, self-contained image manifest simplifies image definition and improves security. This specification will build on that work, leveraging new properties From ff87ad884c15ac64d1d3c34972ff6b0c040e59a7 Mon Sep 17 00:00:00 2001 From: Jesse Haka Date: Tue, 6 Jun 2017 11:02:47 +0300 Subject: [PATCH 076/276] add possibility to clean untagged manifests add tests add possibility to clean untagged manifests Signed-off-by: Jesse Haka add dry tests Signed-off-by: Jesse Haka remove underscores Signed-off-by: Jesse Haka fixes Signed-off-by: Jesse Haka opts struct+use camelcase Signed-off-by: Jesse Haka doublecheck manifest in paths.go Signed-off-by: Jesse Haka add gofmt Signed-off-by: Jesse Haka fix lint Signed-off-by: Jesse Haka add log print Signed-off-by: Jesse Haka move log to dryrun as well Signed-off-by: Jesse Haka remove counter Signed-off-by: Jesse Haka remove manifest tag references Signed-off-by: Jesse Haka add tag to tests Signed-off-by: Jesse Haka manifestsWithoutTags -> removeUntagged Signed-off-by: Jesse Haka remove RemoveManifestTagReferences and use removemanifests Signed-off-by: Jesse Haka remove comment Signed-off-by: Jesse Haka remove pathfor Signed-off-by: Jesse Haka move removemanifest out of manifestenumerator, it does not work correctly if we delete stuff in it Signed-off-by: Jesse Haka add comment Signed-off-by: Jesse Haka fix context -> dcontext Signed-off-by: Jesse Haka fix gofmt --- registry/root.go | 7 +- registry/storage/garbagecollect.go | 50 ++++++++- registry/storage/garbagecollect_test.go | 133 +++++++++++++++++++++++- registry/storage/vacuum.go | 34 ++++++ 4 files changed, 214 insertions(+), 10 deletions(-) diff --git a/registry/root.go b/registry/root.go index 538139f1..94d22f3c 100644 --- a/registry/root.go +++ b/registry/root.go @@ -18,6 +18,7 @@ func init() { RootCmd.AddCommand(ServeCmd) RootCmd.AddCommand(GCCmd) GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs") + GCCmd.Flags().BoolVarP(&removeUntagged, "delete-untagged", "m", false, "delete manifests that are not currently referenced via tag") RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") } @@ -36,6 +37,7 @@ var RootCmd = &cobra.Command{ } var dryRun bool +var removeUntagged bool // GCCmd is the cobra command that corresponds to the garbage-collect subcommand var GCCmd = &cobra.Command{ @@ -75,7 +77,10 @@ var GCCmd = &cobra.Command{ os.Exit(1) } - err = storage.MarkAndSweep(ctx, driver, registry, dryRun) + err = storage.MarkAndSweep(ctx, driver, registry, storage.GCOpts{ + DryRun: dryRun, + RemoveUntagged: removeUntagged, + }) if err != nil { fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err) os.Exit(1) diff --git a/registry/storage/garbagecollect.go b/registry/storage/garbagecollect.go index ada48654..48d428a8 100644 --- a/registry/storage/garbagecollect.go +++ b/registry/storage/garbagecollect.go @@ -14,8 +14,21 @@ func emit(format string, a ...interface{}) { fmt.Printf(format+"\n", a...) } +// GCOpts contains options for garbage collector +type GCOpts struct { + DryRun bool + RemoveUntagged bool +} + +// ManifestDel contains manifest structure which will be deleted +type ManifestDel struct { + Name string + Digest digest.Digest + Tags []string +} + // MarkAndSweep performs a mark and sweep of registry data -func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, dryRun bool) error { +func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, opts GCOpts) error { repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator) if !ok { return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator") @@ -23,6 +36,7 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis // mark markSet := make(map[digest.Digest]struct{}) + manifestArr := make([]ManifestDel, 0) err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error { emit(repoName) @@ -47,6 +61,25 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis } err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + if opts.RemoveUntagged { + // fetch all tags where this manifest is the latest one + tags, err := repository.Tags(ctx).Lookup(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + return fmt.Errorf("failed to retrieve tags for digest %v: %v", dgst, err) + } + if len(tags) == 0 { + emit("manifest eligible for deletion: %s", dgst) + // fetch all tags from repository + // all of these tags could contain manifest in history + // which means that we need check (and delete) those references when deleting manifest + allTags, err := repository.Tags(ctx).All(ctx) + if err != nil { + return fmt.Errorf("failed to retrieve tags %v", err) + } + manifestArr = append(manifestArr, ManifestDel{Name: repoName, Digest: dgst, Tags: allTags}) + return nil + } + } // Mark the manifest's blob emit("%s: marking manifest %s ", repoName, dgst) markSet[dgst] = struct{}{} @@ -84,6 +117,15 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis } // sweep + vacuum := NewVacuum(ctx, storageDriver) + if !opts.DryRun { + for _, obj := range manifestArr { + err = vacuum.RemoveManifest(obj.Name, obj.Digest, obj.Tags) + if err != nil { + return fmt.Errorf("failed to delete manifest %s: %v", obj.Digest, err) + } + } + } blobService := registry.Blobs() deleteSet := make(map[digest.Digest]struct{}) err = blobService.Enumerate(ctx, func(dgst digest.Digest) error { @@ -96,12 +138,10 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis if err != nil { return fmt.Errorf("error enumerating blobs: %v", err) } - emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet)) - // Construct vacuum - vacuum := NewVacuum(ctx, storageDriver) + emit("\n%d blobs marked, %d blobs and %d manifests eligible for deletion", len(markSet), len(deleteSet), len(manifestArr)) for dgst := range deleteSet { emit("blob eligible for deletion: %s", dgst) - if dryRun { + if opts.DryRun { continue } err = vacuum.RemoveBlob(string(dgst)) diff --git a/registry/storage/garbagecollect_test.go b/registry/storage/garbagecollect_test.go index 2e36fddb..1694f891 100644 --- a/registry/storage/garbagecollect_test.go +++ b/registry/storage/garbagecollect_test.go @@ -61,6 +61,23 @@ func makeManifestService(t *testing.T, repository distribution.Repository) distr return manifestService } +func allManifests(t *testing.T, manifestService distribution.ManifestService) map[digest.Digest]struct{} { + ctx := context.Background() + allManMap := make(map[digest.Digest]struct{}) + manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator) + if !ok { + t.Fatalf("unable to convert ManifestService into ManifestEnumerator") + } + err := manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + allManMap[dgst] = struct{}{} + return nil + }) + if err != nil { + t.Fatalf("Error getting all manifests: %v", err) + } + return allManMap +} + func allBlobs(t *testing.T, registry distribution.Namespace) map[digest.Digest]struct{} { ctx := context.Background() blobService := registry.Blobs() @@ -169,7 +186,10 @@ func TestNoDeletionNoEffect(t *testing.T) { before := allBlobs(t, registry) // Run GC - err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{ + DryRun: false, + RemoveUntagged: false, + }) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } @@ -180,6 +200,102 @@ func TestNoDeletionNoEffect(t *testing.T) { } } +func TestDeleteManifestIfTagNotFound(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + + registry := createRegistry(t, inmemoryDriver) + repo := makeRepository(t, registry, "deletemanifests") + manifestService, err := repo.Manifests(ctx) + + // Create random layers + randomLayers1, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + randomLayers2, err := testutil.CreateRandomLayers(3) + if err != nil { + t.Fatalf("failed to make layers: %v", err) + } + + // Upload all layers + err = testutil.UploadBlobs(repo, randomLayers1) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + err = testutil.UploadBlobs(repo, randomLayers2) + if err != nil { + t.Fatalf("failed to upload layers: %v", err) + } + + // Construct manifests + manifest1, err := testutil.MakeSchema1Manifest(getKeys(randomLayers1)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + manifest2, err := testutil.MakeSchema1Manifest(getKeys(randomLayers2)) + if err != nil { + t.Fatalf("failed to make manifest: %v", err) + } + + _, err = manifestService.Put(ctx, manifest1) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + _, err = manifestService.Put(ctx, manifest2) + if err != nil { + t.Fatalf("manifest upload failed: %v", err) + } + + manifestEnumerator, _ := manifestService.(distribution.ManifestEnumerator) + err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + repo.Tags(ctx).Tag(ctx, "test", distribution.Descriptor{Digest: dgst}) + return nil + }) + + before1 := allBlobs(t, registry) + before2 := allManifests(t, manifestService) + + // run GC with dry-run (should not remove anything) + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{ + DryRun: true, + RemoveUntagged: true, + }) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + afterDry1 := allBlobs(t, registry) + afterDry2 := allManifests(t, manifestService) + if len(before1) != len(afterDry1) { + t.Fatalf("Garbage collection affected blobs storage: %d != %d", len(before1), len(afterDry1)) + } + if len(before2) != len(afterDry2) { + t.Fatalf("Garbage collection affected manifest storage: %d != %d", len(before2), len(afterDry2)) + } + + // Run GC (removes everything because no manifests with tags exist) + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{ + DryRun: false, + RemoveUntagged: true, + }) + if err != nil { + t.Fatalf("Failed mark and sweep: %v", err) + } + + after1 := allBlobs(t, registry) + after2 := allManifests(t, manifestService) + if len(before1) == len(after1) { + t.Fatalf("Garbage collection affected blobs storage: %d == %d", len(before1), len(after1)) + } + if len(before2) == len(after2) { + t.Fatalf("Garbage collection affected manifest storage: %d == %d", len(before2), len(after2)) + } +} + func TestGCWithMissingManifests(t *testing.T) { ctx := context.Background() d := inmemory.New() @@ -200,7 +316,10 @@ func TestGCWithMissingManifests(t *testing.T) { t.Fatal(err) } - err = MarkAndSweep(context.Background(), d, registry, false) + err = MarkAndSweep(context.Background(), d, registry, GCOpts{ + DryRun: false, + RemoveUntagged: false, + }) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } @@ -227,7 +346,10 @@ func TestDeletionHasEffect(t *testing.T) { manifests.Delete(ctx, image3.manifestDigest) // Run GC - err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{ + DryRun: false, + RemoveUntagged: false, + }) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } @@ -361,7 +483,10 @@ func TestOrphanBlobDeleted(t *testing.T) { uploadRandomSchema2Image(t, repo) // Run GC - err = MarkAndSweep(context.Background(), inmemoryDriver, registry, false) + err = MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{ + DryRun: false, + RemoveUntagged: false, + }) if err != nil { t.Fatalf("Failed mark and sweep: %v", err) } diff --git a/registry/storage/vacuum.go b/registry/storage/vacuum.go index d2ebc539..a43db17a 100644 --- a/registry/storage/vacuum.go +++ b/registry/storage/vacuum.go @@ -50,6 +50,40 @@ func (v Vacuum) RemoveBlob(dgst string) error { return nil } +// RemoveManifest removes a manifest from the filesystem +func (v Vacuum) RemoveManifest(name string, dgst digest.Digest, tags []string) error { + // remove a tag manifest reference, in case of not found continue to next one + for _, tag := range tags { + + tagsPath, err := pathFor(manifestTagIndexEntryPathSpec{name: name, revision: dgst, tag: tag}) + if err != nil { + return err + } + + _, err = v.driver.Stat(v.ctx, tagsPath) + if err != nil { + switch err := err.(type) { + case driver.PathNotFoundError: + continue + default: + return err + } + } + dcontext.GetLogger(v.ctx).Infof("deleting manifest tag reference: %s", tagsPath) + err = v.driver.Delete(v.ctx, tagsPath) + if err != nil { + return err + } + } + + manifestPath, err := pathFor(manifestRevisionPathSpec{name: name, revision: dgst}) + if err != nil { + return err + } + dcontext.GetLogger(v.ctx).Infof("deleting manifest: %s", manifestPath) + return v.driver.Delete(v.ctx, manifestPath) +} + // RemoveRepository removes a repository directory from the // filesystem func (v Vacuum) RemoveRepository(repoName string) error { From 9f664468ea69f973a0a8fc3e60962a4cae679067 Mon Sep 17 00:00:00 2001 From: Sargun Dhillon Date: Wed, 29 Nov 2017 14:20:27 -0800 Subject: [PATCH 077/276] Upgrade AWS library to v1.12.36 This upgrade, and vendors aws-sdk-go to version v1.12.36. This is because it has new API calls accessible to the S3 client, specifically S3.ListObjectsV2PagesWithContext Signed-off-by: Sargun Dhillon --- vendor.conf | 2 +- vendor/github.com/aws/aws-sdk-go/README.md | 460 +- .../aws/aws-sdk-go/aws/client/client.go | 69 +- .../aws-sdk-go/aws/client/default_retryer.go | 68 +- .../aws/aws-sdk-go/aws/client/logger.go | 108 + .../github.com/aws/aws-sdk-go/aws/config.go | 34 +- .../github.com/aws/aws-sdk-go/aws/context.go | 71 + .../aws/aws-sdk-go/aws/context_1_6.go | 41 + .../aws/aws-sdk-go/aws/context_1_7.go | 9 + .../aws/aws-sdk-go/aws/convert_types.go | 18 + .../aws-sdk-go/aws/corehandlers/handlers.go | 132 +- .../aws/credentials/chain_provider.go | 14 +- .../aws-sdk-go/aws/credentials/credentials.go | 33 +- .../aws/credentials/env_provider.go | 1 + .../shared_credentials_provider.go | 33 +- .../stscreds/assume_role_provider.go | 159 +- .../aws/aws-sdk-go/aws/defaults/defaults.go | 47 +- .../aws-sdk-go/aws/defaults/shared_config.go | 27 + vendor/github.com/aws/aws-sdk-go/aws/doc.go | 56 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 605 +- .../aws/aws-sdk-go/aws/endpoints/doc.go | 4 +- .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 146 +- .../aws/aws-sdk-go/aws/endpoints/v3model.go | 4 +- .../aws/endpoints/v3model_codegen.go | 19 +- .../aws/aws-sdk-go/aws/jsonvalue.go | 12 + .../github.com/aws/aws-sdk-go/aws/logger.go | 4 +- .../aws/request/connection_reset_error.go | 19 + .../request/connection_reset_error_other.go | 11 + .../aws/aws-sdk-go/aws/request/handlers.go | 97 +- .../aws/aws-sdk-go/aws/request/request.go | 336 +- .../aws/aws-sdk-go/aws/request/request_1_7.go | 22 +- .../aws/aws-sdk-go/aws/request/request_1_8.go | 30 +- .../aws-sdk-go/aws/request/request_context.go | 14 + .../aws/request/request_context_1_6.go | 14 + .../aws/request/request_pagination.go | 154 +- .../aws/aws-sdk-go/aws/request/retryer.go | 99 +- .../aws/request/timeout_read_closer.go | 94 + .../aws/aws-sdk-go/aws/request/validation.go | 2 +- .../aws/aws-sdk-go/aws/request/waiter.go | 295 + .../aws/aws-sdk-go/aws/session/doc.go | 98 +- .../aws/aws-sdk-go/aws/session/env_config.go | 61 +- .../aws/aws-sdk-go/aws/session/session.go | 242 +- .../aws-sdk-go/aws/session/shared_config.go | 2 +- .../aws/aws-sdk-go/aws/signer/v4/options.go | 7 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 127 +- vendor/github.com/aws/aws-sdk-go/aws/types.go | 2 +- vendor/github.com/aws/aws-sdk-go/aws/url.go | 12 + .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 29 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../internal/shareddefaults/shared_config.go | 40 + .../aws-sdk-go/private/protocol/jsonvalue.go | 76 + .../protocol/query/queryutil/queryutil.go | 11 +- .../aws-sdk-go/private/protocol/rest/build.go | 51 +- .../private/protocol/rest/unmarshal.go | 23 +- .../private/protocol/xml/xmlutil/build.go | 1 - .../private/protocol/xml/xmlutil/unmarshal.go | 7 +- .../protocol/xml/xmlutil/xml_to_struct.go | 13 +- .../aws/aws-sdk-go/private/waiter/waiter.go | 134 - .../service/cloudfront/sign/sign_cookie.go | 6 +- .../aws/aws-sdk-go/service/s3/api.go | 5299 ++++++++++++++--- .../aws-sdk-go/service/s3/bucket_location.go | 65 +- .../aws-sdk-go/service/s3/customizations.go | 18 + .../aws/aws-sdk-go/service/s3/doc.go | 26 + .../aws/aws-sdk-go/service/s3/doc_custom.go | 109 + .../aws/aws-sdk-go/service/s3/errors.go | 48 + .../service/s3/host_style_bucket.go | 13 +- .../aws/aws-sdk-go/service/s3/service.go | 12 +- .../aws/aws-sdk-go/service/s3/sse.go | 18 +- .../aws-sdk-go/service/s3/unmarshal_error.go | 64 +- .../aws/aws-sdk-go/service/s3/waiters.go | 215 +- .../aws/aws-sdk-go/service/sts/api.go | 318 +- .../aws/aws-sdk-go/service/sts/doc.go | 72 + .../aws/aws-sdk-go/service/sts/errors.go | 73 + .../aws/aws-sdk-go/service/sts/service.go | 54 +- 74 files changed, 8738 insertions(+), 1973 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/errors.go diff --git a/vendor.conf b/vendor.conf index d67edd77..7bea159c 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,7 +1,7 @@ github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 -github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716 +github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 diff --git a/vendor/github.com/aws/aws-sdk-go/README.md b/vendor/github.com/aws/aws-sdk-go/README.md index df806f40..d8acfdd0 100644 --- a/vendor/github.com/aws/aws-sdk-go/README.md +++ b/vendor/github.com/aws/aws-sdk-go/README.md @@ -1,11 +1,6 @@ -# AWS SDK for Go +[![API Reference](http://img.shields.io/badge/api-reference-blue.svg)](http://docs.aws.amazon.com/sdk-for-go/api) [![Join the chat at https://gitter.im/aws/aws-sdk-go](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/aws/aws-sdk-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Build Status](https://img.shields.io/travis/aws/aws-sdk-go.svg)](https://travis-ci.org/aws/aws-sdk-go) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) - -[![API Reference](http://img.shields.io/badge/api-reference-blue.svg)](http://docs.aws.amazon.com/sdk-for-go/api) -[![Join the chat at https://gitter.im/aws/aws-sdk-go](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/aws/aws-sdk-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Build Status](https://img.shields.io/travis/aws/aws-sdk-go.svg)](https://travis-ci.org/aws/aws-sdk-go) -[![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) - +# AWS SDK for Go aws-sdk-go is the official AWS SDK for the Go programming language. @@ -30,7 +25,23 @@ These two processes will still include the `vendor` folder and it should be dele rm -rf $GOPATH/src/github.com/aws/aws-sdk-go/vendor +## Getting Help + +Please use these community resources for getting help. We use the GitHub issues for tracking bugs and feature requests. + +* Ask a question on [StackOverflow](http://stackoverflow.com/) and tag it with the [`aws-sdk-go`](http://stackoverflow.com/questions/tagged/aws-sdk-go) tag. +* Come join the AWS SDK for Go community chat on [gitter](https://gitter.im/aws/aws-sdk-go). +* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html). +* If you think you may have found a bug, please open an [issue](https://github.com/aws/aws-sdk-go/issues/new). + +## Opening Issues + +If you encounter a bug with the AWS SDK for Go we would like to hear about it. Search the [existing issues](https://github.com/aws/aws-sdk-go/issues) and see if others are also experiencing the issue before opening a new issue. Please include the version of AWS SDK for Go, Go language, and OS you’re using. Please also include repro case when appropriate. + +The GitHub issues are intended for bug reports and feature requests. For help and questions with using AWS SDK for GO please make use of the resources listed in the [Getting Help](https://github.com/aws/aws-sdk-go#getting-help) section. Keeping the list of open issues lean will help us respond in a timely manner. + ## Reference Documentation + [`Getting Started Guide`](https://aws.amazon.com/sdk-for-go/) - This document is a general introduction how to configure and make requests with the SDK. If this is your first time using the SDK, this documentation and the API documentation will help you get started. This document focuses on the syntax and behavior of the SDK. The [Service Developer Guide](https://aws.amazon.com/documentation/) will help you get started using specific AWS services. [`SDK API Reference Documentation`](https://docs.aws.amazon.com/sdk-for-go/api/) - Use this document to look up all API operation input and output parameters for AWS services supported by the SDK. The API reference also includes documentation of the SDK, and examples how to using the SDK, service client API operations, and API operation require parameters. @@ -39,80 +50,397 @@ These two processes will still include the `vendor` folder and it should be dele [`SDK Examples`](https://github.com/aws/aws-sdk-go/tree/master/example) - Included in the SDK's repo are a several hand crafted examples using the SDK features and AWS services. -## Configuring Credentials +## Overview of SDK's Packages -Before using the SDK, ensure that you've configured credentials. The best -way to configure credentials on a development machine is to use the -`~/.aws/credentials` file, which might look like: +The SDK is composed of two main components, SDK core, and service clients. +The SDK core packages are all available under the aws package at the root of +the SDK. Each client for a supported AWS service is available within its own +package under the service folder at the root of the SDK. -``` -[default] -aws_access_key_id = AKID1234567890 -aws_secret_access_key = MY-SECRET-KEY -``` + * aws - SDK core, provides common shared types such as Config, Logger, + and utilities to make working with API parameters easier. -You can learn more about the credentials file from this -[blog post](http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs). + * awserr - Provides the error interface that the SDK will use for all + errors that occur in the SDK's processing. This includes service API + response errors as well. The Error type is made up of a code and message. + Cast the SDK's returned error type to awserr.Error and call the Code + method to compare returned error to specific error codes. See the package's + documentation for additional values that can be extracted such as RequestID. -Alternatively, you can set the following environment variables: + * credentials - Provides the types and built in credentials providers + the SDK will use to retrieve AWS credentials to make API requests with. + Nested under this folder are also additional credentials providers such as + stscreds for assuming IAM roles, and ec2rolecreds for EC2 Instance roles. -``` -AWS_ACCESS_KEY_ID=AKID1234567890 -AWS_SECRET_ACCESS_KEY=MY-SECRET-KEY -``` + * endpoints - Provides the AWS Regions and Endpoints metadata for the SDK. + Use this to lookup AWS service endpoint information such as which services + are in a region, and what regions a service is in. Constants are also provided + for all region identifiers, e.g UsWest2RegionID for "us-west-2". -### AWS shared config file (`~/.aws/config`) -The AWS SDK for Go added support the shared config file in release [v1.3.0](https://github.com/aws/aws-sdk-go/releases/tag/v1.3.0). You can opt into enabling support for the shared config by setting the environment variable `AWS_SDK_LOAD_CONFIG` to a truthy value. See the [Session](https://github.com/aws/aws-sdk-go/wiki/sessions) wiki for more information about this feature. + * session - Provides initial default configuration, and load + configuration from external sources such as environment and shared + credentials file. -## Using the Go SDK + * request - Provides the API request sending, and retry logic for the SDK. + This package also includes utilities for defining your own request + retryer, and configuring how the SDK processes the request. -To use a service in the SDK, create a service variable by calling the `New()` -function. Once you have a service client, you can call API operations which each -return response data and a possible error. + * service - Clients for AWS services. All services supported by the SDK are + available under this folder. -To list a set of instance IDs from EC2, you could run: +## How to Use the SDK's AWS Service Clients + +The SDK includes the Go types and utilities you can use to make requests to +AWS service APIs. Within the service folder at the root of the SDK you'll find +a package for each AWS service the SDK supports. All service clients follows +a common pattern of creation and usage. + +When creating a client for an AWS service you'll first need to have a Session +value constructed. The Session provides shared configuration that can be shared +between your service clients. When service clients are created you can pass +in additional configuration via the aws.Config type to override configuration +provided by in the Session to create service client instances with custom +configuration. + +Once the service's client is created you can use it to make API requests the +AWS service. These clients are safe to use concurrently. + +## Configuring the SDK + +In the AWS SDK for Go, you can configure settings for service clients, such +as the log level and maximum number of retries. Most settings are optional; +however, for each service client, you must specify a region and your credentials. +The SDK uses these values to send requests to the correct AWS region and sign +requests with the correct credentials. You can specify these values as part +of a session or as environment variables. + +See the SDK's [configuration guide][config_guide] for more information. + +See the [session][session_pkg] package documentation for more information on how to use Session +with the SDK. + +See the [Config][config_typ] type in the [aws][aws_pkg] package for more information on configuration +options. + +[config_guide]: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html +[session_pkg]: https://docs.aws.amazon.com/sdk-for-go/api/aws/session/ +[config_typ]: https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +[aws_pkg]: https://docs.aws.amazon.com/sdk-for-go/api/aws/ + +### Configuring Credentials + +When using the SDK you'll generally need your AWS credentials to authenticate +with AWS services. The SDK supports multiple methods of supporting these +credentials. By default the SDK will source credentials automatically from +its default credential chain. See the session package for more information +on this chain, and how to configure it. The common items in the credential +chain are the following: + + * Environment Credentials - Set of environment variables that are useful + when sub processes are created for specific roles. + + * Shared Credentials file (~/.aws/credentials) - This file stores your + credentials based on a profile name and is useful for local development. + + * EC2 Instance Role Credentials - Use EC2 Instance Role to assign credentials + to application running on an EC2 instance. This removes the need to manage + credential files in production. + +Credentials can be configured in code as well by setting the Config's Credentials +value to a custom provider or using one of the providers included with the +SDK to bypass the default credential chain and use a custom one. This is +helpful when you want to instruct the SDK to only use a specific set of +credentials or providers. + +This example creates a credential provider for assuming an IAM role, "myRoleARN" +and configures the S3 service client to use that role for API requests. ```go -package main + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) -import ( - "fmt" + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/ec2" -) - -func main() { - sess, err := session.NewSession() - if err != nil { - panic(err) - } - - // Create an EC2 service object in the "us-west-2" region - // Note that you can also configure your region globally by - // exporting the AWS_REGION environment variable - svc := ec2.New(sess, &aws.Config{Region: aws.String("us-west-2")}) - - // Call the DescribeInstances Operation - resp, err := svc.DescribeInstances(nil) - if err != nil { - panic(err) - } - - // resp has all of the response data, pull out instance IDs: - fmt.Println("> Number of reservation sets: ", len(resp.Reservations)) - for idx, res := range resp.Reservations { - fmt.Println(" > Number of instances: ", len(res.Instances)) - for _, inst := range resp.Reservations[idx].Instances { - fmt.Println(" - Instance ID: ", *inst.InstanceId) - } - } -} + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) ``` -You can find more information and operations in our -[API documentation](http://docs.aws.amazon.com/sdk-for-go/api/). +See the [credentials][credentials_pkg] package documentation for more information on credential +providers included with the SDK, and how to customize the SDK's usage of +credentials. + +The SDK has support for the shared configuration file (~/.aws/config). This +support can be enabled by setting the environment variable, "AWS_SDK_LOAD_CONFIG=1", +or enabling the feature in code when creating a Session via the +Option's SharedConfigState parameter. + +```go + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) +``` + +[credentials_pkg]: https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials + +### Configuring AWS Region + +In addition to the credentials you'll need to specify the region the SDK +will use to make AWS API requests to. In the SDK you can specify the region +either with an environment variable, or directly in code when a Session or +service client is created. The last value specified in code wins if the region +is specified multiple ways. + +To set the region via the environment variable set the "AWS_REGION" to the +region you want to the SDK to use. Using this method to set the region will +allow you to run your application in multiple regions without needing additional +code in the application to select the region. + + AWS_REGION=us-west-2 + +The endpoints package includes constants for all regions the SDK knows. The +values are all suffixed with RegionID. These values are helpful, because they +reduce the need to type the region string manually. + +To set the region on a Session use the aws package's Config struct parameter +Region to the AWS region you want the service clients created from the session to +use. This is helpful when you want to create multiple service clients, and +all of the clients make API requests to the same region. + +```go + sess := session.Must(session.NewSession(&aws.Config{ + Region: aws.String(endpoints.UsWest2RegionID), + })) +``` + +See the [endpoints][endpoints_pkg] package for the AWS Regions and Endpoints metadata. + +In addition to setting the region when creating a Session you can also set +the region on a per service client bases. This overrides the region of a +Session. This is helpful when you want to create service clients in specific +regions different from the Session's region. + +```go + svc := s3.New(sess, &aws.Config{ + Region: aws.String(endpoints.UsWest2RegionID), + }) +``` + +See the [Config][config_typ] type in the [aws][aws_pkg] package for more information and additional +options such as setting the Endpoint, and other service client configuration options. + +[endpoints_pkg]: https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/ + +## Making API Requests + +Once the client is created you can make an API request to the service. +Each API method takes a input parameter, and returns the service response +and an error. The SDK provides methods for making the API call in multiple ways. + +In this list we'll use the S3 ListObjects API as an example for the different +ways of making API requests. + + * ListObjects - Base API operation that will make the API request to the service. + + * ListObjectsRequest - API methods suffixed with Request will construct the + API request, but not send it. This is also helpful when you want to get a + presigned URL for a request, and share the presigned URL instead of your + application making the request directly. + + * ListObjectsPages - Same as the base API operation, but uses a callback to + automatically handle pagination of the API's response. + + * ListObjectsWithContext - Same as base API operation, but adds support for + the Context pattern. This is helpful for controlling the canceling of in + flight requests. See the Go standard library context package for more + information. This method also takes request package's Option functional + options as the variadic argument for modifying how the request will be + made, or extracting information from the raw HTTP response. + + * ListObjectsPagesWithContext - same as ListObjectsPages, but adds support for + the Context pattern. Similar to ListObjectsWithContext this method also + takes the request package's Option function option types as the variadic + argument. + +In addition to the API operations the SDK also includes several higher level +methods that abstract checking for and waiting for an AWS resource to be in +a desired state. In this list we'll use WaitUntilBucketExists to demonstrate +the different forms of waiters. + + * WaitUntilBucketExists. - Method to make API request to query an AWS service for + a resource's state. Will return successfully when that state is accomplished. + + * WaitUntilBucketExistsWithContext - Same as WaitUntilBucketExists, but adds + support for the Context pattern. In addition these methods take request + package's WaiterOptions to configure the waiter, and how underlying request + will be made by the SDK. + +The API method will document which error codes the service might return for +the operation. These errors will also be available as const strings prefixed +with "ErrCode" in the service client's package. If there are no errors listed +in the API's SDK documentation you'll need to consult the AWS service's API +documentation for the errors that could be returned. + +```go + ctx := context.Background() + + result, err := svc.GetObjectWithContext(ctx, &s3.GetObjectInput{ + Bucket: aws.String("my-bucket"), + Key: aws.String("my-key"), + }) + if err != nil { + // Cast err to awserr.Error to handle specific error codes. + aerr, ok := err.(awserr.Error) + if ok && aerr.Code() == s3.ErrCodeNoSuchKey { + // Specific error code handling + } + return err + } + + // Make sure to close the body when done with it for S3 GetObject APIs or + // will leak connections. + defer result.Body.Close() + + fmt.Println("Object Size:", aws.StringValue(result.ContentLength)) +``` + +### API Request Pagination and Resource Waiters + +Pagination helper methods are suffixed with "Pages", and provide the +functionality needed to round trip API page requests. Pagination methods +take a callback function that will be called for each page of the API's response. + +```go + objects := []string{} + err := svc.ListObjectsPagesWithContext(ctx, &s3.ListObjectsInput{ + Bucket: aws.String(myBucket), + }, func(p *s3.ListObjectsOutput, lastPage bool) bool { + for _, o := range p.Contents { + objects = append(objects, aws.StringValue(o.Key)) + } + return true // continue paging + }) + if err != nil { + panic(fmt.Sprintf("failed to list objects for bucket, %s, %v", myBucket, err)) + } + + fmt.Println("Objects in bucket:", objects) +``` + +Waiter helper methods provide the functionality to wait for an AWS resource +state. These methods abstract the logic needed to to check the state of an +AWS resource, and wait until that resource is in a desired state. The waiter +will block until the resource is in the state that is desired, an error occurs, +or the waiter times out. If a resource times out the error code returned will +be request.WaiterResourceNotReadyErrorCode. + +```go + err := svc.WaitUntilBucketExistsWithContext(ctx, &s3.HeadBucketInput{ + Bucket: aws.String(myBucket), + }) + if err != nil { + aerr, ok := err.(awserr.Error) + if ok && aerr.Code() == request.WaiterResourceNotReadyErrorCode { + fmt.Fprintf(os.Stderr, "timed out while waiting for bucket to exist") + } + panic(fmt.Errorf("failed to wait for bucket to exist, %v", err)) + } + fmt.Println("Bucket", myBucket, "exists") +``` + +## Complete SDK Example + +This example shows a complete working Go file which will upload a file to S3 +and use the Context pattern to implement timeout logic that will cancel the +request if it takes too long. This example highlights how to use sessions, +create a service client, make a request, handle the error, and process the +response. + +```go + package main + + import ( + "context" + "flag" + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + ) + + // Uploads a file to S3 given a bucket and object key. Also takes a duration + // value to terminate the update if it doesn't complete within that time. + // + // The AWS Region needs to be provided in the AWS shared config or on the + // environment variable as `AWS_REGION`. Credentials also must be provided + // Will default to shared config file, but can load from environment if provided. + // + // Usage: + // # Upload myfile.txt to myBucket/myKey. Must complete within 10 minutes or will fail + // go run withContext.go -b mybucket -k myKey -d 10m < myfile.txt + func main() { + var bucket, key string + var timeout time.Duration + + flag.StringVar(&bucket, "b", "", "Bucket name.") + flag.StringVar(&key, "k", "", "Object key name.") + flag.DurationVar(&timeout, "d", 0, "Upload timeout.") + flag.Parse() + + // All clients require a Session. The Session provides the client with + // shared configuration such as region, endpoint, and credentials. A + // Session should be shared where possible to take advantage of + // configuration and credential caching. See the session package for + // more information. + sess := session.Must(session.NewSession()) + + // Create a new instance of the service's client with a Session. + // Optional aws.Config values can also be provided as variadic arguments + // to the New function. This option allows you to provide service + // specific configuration. + svc := s3.New(sess) + + // Create a context with a timeout that will abort the upload if it takes + // more than the passed in timeout. + ctx := context.Background() + var cancelFn func() + if timeout > 0 { + ctx, cancelFn = context.WithTimeout(ctx, timeout) + } + // Ensure the context is canceled to prevent leaking. + // See context package for more information, https://golang.org/pkg/context/ + defer cancelFn() + + // Uploads the object to S3. The Context will interrupt the request if the + // timeout expires. + _, err := svc.PutObjectWithContext(ctx, &s3.PutObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + Body: os.Stdin, + }) + if err != nil { + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == request.CanceledErrorCode { + // If the SDK can determine the request or retry delay was canceled + // by a context the CanceledErrorCode error code will be returned. + fmt.Fprintf(os.Stderr, "upload canceled due to timeout, %v\n", err) + } else { + fmt.Fprintf(os.Stderr, "failed to upload object, %v\n", err) + } + os.Exit(1) + } + + fmt.Printf("successfully uploaded file to %s/%s\n", bucket, key) + } +``` ## License diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index aeeada0d..788fe6e2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -2,7 +2,6 @@ package client import ( "fmt" - "net/http/httputil" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client/metadata" @@ -24,6 +23,13 @@ type ConfigProvider interface { ClientConfig(serviceName string, cfgs ...*aws.Config) Config } +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + // A Client implements the base client request and response handling // used by all service clients. type Client struct { @@ -39,7 +45,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op svc := &Client{ Config: cfg, ClientInfo: info, - Handlers: handlers, + Handlers: handlers.Copy(), } switch retryer, ok := cfg.Retryer.(request.Retryer); { @@ -79,61 +85,6 @@ func (c *Client) AddDebugHandlers() { return } - c.Handlers.Send.PushFront(logRequest) - c.Handlers.Send.PushBack(logResponse) -} - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ----[ REQUEST DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -func logRequest(r *request.Request) { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - if logBody { - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.ResetBody() - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ----[ RESPONSE DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -func logResponse(r *request.Request) { - var msg = "no response data" - if r.HTTPResponse != nil { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - msg = string(dumpedBody) - } else if r.Error != nil { - msg = r.Error.Error() - } - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) + c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) + c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 43a3676b..63d2df67 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -2,6 +2,7 @@ package client import ( "math/rand" + "strconv" "sync" "time" @@ -15,11 +16,11 @@ import ( // the MaxRetries method: // // type retryer struct { -// service.DefaultRetryer +// client.DefaultRetryer // } // // // This implementation always has 100 max retries -// func (d retryer) MaxRetries() uint { return 100 } +// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { NumMaxRetries int } @@ -38,14 +39,18 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { minTime := 30 throttle := d.shouldThrottle(r) if throttle { + if delay, ok := getRetryDelay(r); ok { + return delay + } + minTime = 500 } retryCount := r.RetryCount - if retryCount > 13 { - retryCount = 13 - } else if throttle && retryCount > 8 { + if throttle && retryCount > 8 { retryCount = 8 + } else if retryCount > 13 { + retryCount = 13 } delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) @@ -54,6 +59,12 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { // ShouldRetry returns true if the request should be retried. func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + if r.HTTPResponse.StatusCode >= 500 { return true } @@ -62,12 +73,49 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { // ShouldThrottle returns true if the request should be throttled. func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - if r.HTTPResponse.StatusCode == 502 || - r.HTTPResponse.StatusCode == 503 || - r.HTTPResponse.StatusCode == 504 { - return true + switch r.HTTPResponse.StatusCode { + case 429: + case 502: + case 503: + case 504: + default: + return r.IsErrorThrottle() } - return r.IsErrorThrottle() + + return true +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true } // lockedSource is a thread-safe implementation of rand.Source diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 00000000..1f39c91f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,108 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.ResetBody() + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + + handlerFn := func(req *request.Request) { + body, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body))) + if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) { + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index d58b8128..ae3a2869 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -22,9 +22,9 @@ type RequestRetryer interface{} // // // Create Session with MaxRetry configuration to be shared by multiple // // service clients. -// sess, err := session.NewSession(&aws.Config{ +// sess := session.Must(session.NewSession(&aws.Config{ // MaxRetries: aws.Int(3), -// }) +// })) // // // Create S3 service client with a specific Region. // svc := s3.New(sess, &aws.Config{ @@ -53,6 +53,13 @@ type Config struct { // to use based on region. EndpointResolver endpoints.Resolver + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + // The region to send requests to. This parameter is required and must // be configured globally or on a per-client basis unless otherwise // noted. A full list of regions is found in the "Regions and Endpoints" @@ -88,7 +95,7 @@ type Config struct { // recoverable failures. // // When nil or the value does not implement the request.Retryer interface, - // the request.DefaultRetryer will be used. + // the client.DefaultRetryer will be used. // // When both Retryer and MaxRetries are non-nil, the former is used and // the latter ignored. @@ -154,7 +161,8 @@ type Config struct { // the EC2Metadata overriding the timeout for default credentials chain. // // Example: - // sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) // // svc := s3.New(sess) // @@ -174,7 +182,7 @@ type Config struct { // // Only supported with. // - // sess, err := session.NewSession() + // sess := session.Must(session.NewSession()) // // svc := s3.New(sess, &aws.Config{ // UseDualStack: aws.Bool(true), @@ -186,13 +194,19 @@ type Config struct { // request delays. This value should only be used for testing. To adjust // the delay of a request see the aws/client.DefaultRetryer and // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. SleepDelay func(time.Duration) // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. // Will default to false. This would only be used for empty directory names in s3 requests. // // Example: - // sess, err := session.NewSession(&aws.Config{DisableRestProtocolURICleaning: aws.Bool(true)) + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) // // svc := s3.New(sess) // out, err := svc.GetObject(&s3.GetObjectInput { @@ -207,9 +221,9 @@ type Config struct { // // // Create Session with MaxRetry configuration to be shared by multiple // // service clients. -// sess, err := session.NewSession(aws.NewConfig(). +// sess := session.Must(session.NewSession(aws.NewConfig(). // WithMaxRetries(3), -// ) +// )) // // // Create S3 service client with a specific Region. // svc := s3.New(sess, aws.NewConfig(). @@ -436,6 +450,10 @@ func mergeInConfig(dst *Config, other *Config) { if other.DisableRestProtocolURICleaning != nil { dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go new file mode 100644 index 00000000..79f42685 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context.go @@ -0,0 +1,71 @@ +package aws + +import ( + "time" +) + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go new file mode 100644 index 00000000..8fdda530 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go @@ -0,0 +1,41 @@ +// +build !go1.7 + +package aws + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case backgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +var ( + backgroundCtx = new(emptyCtx) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go new file mode 100644 index 00000000..064f75c9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go @@ -0,0 +1,9 @@ +// +build go1.7 + +package aws + +import "context" + +var ( + backgroundCtx = context.Background() +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index 3b73a7da..ff5d58e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -311,6 +311,24 @@ func TimeValue(v *time.Time) time.Time { return time.Time{} } +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + // TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". // The result is undefined if the Unix time cannot be represented by an int64. // Which includes calling TimeUnixMilli on a zero Time is undefined. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 8a7bafc7..495e3ef6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -27,7 +27,7 @@ type lener interface { // or will use the HTTPRequest.Header's "Content-Length" if defined. If unable // to determine request body length and no "Content-Length" was specified it will panic. // -// The Content-Length will only be aded to the request if the length of the body +// The Content-Length will only be added to the request if the length of the body // is greater than 0. If the body is empty or the current `Content-Length` // header is <= 0, the header will also be stripped. var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { @@ -71,8 +71,8 @@ var reStatusCode = regexp.MustCompile(`^(\d{3})`) // ValidateReqSigHandler is a request handler to ensure that the request's // signature doesn't expire before it is sent. This can happen when a request -// is built and signed signficantly before it is sent. Or significant delays -// occur whne retrying requests that would cause the signature to expire. +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. var ValidateReqSigHandler = request.NamedHandler{ Name: "core.ValidateReqSigHandler", Fn: func(r *request.Request) { @@ -98,44 +98,95 @@ var ValidateReqSigHandler = request.NamedHandler{ } // SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { - var err error - r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) - if err != nil { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other url redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), + StatusCode: int(code), + Status: http.StatusText(int(code)), Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } + return } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable } -}} + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} // ValidateResponseHandler is a request handler to validate service response. var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { @@ -150,13 +201,22 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { // If one of the other handlers already set the retry state // we don't want to override it based on the service's state - if r.Retryable == nil { + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { r.Retryable = aws.Bool(r.ShouldRetry(r)) } if r.WillRetry() { r.RetryDelay = r.RetryRules(r) - r.Config.SleepDelay(r.RetryDelay) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } // when the expired token exception occurs the credentials // need to be expired locally so that the next request to diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go index 6efc77bf..f298d659 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -13,7 +13,7 @@ var ( // // @readonly ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. + `no valid providers in chain. Deprecated. For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, nil) ) @@ -39,16 +39,18 @@ var ( // does not return any credentials ChainProvider will return the error // ErrNoValidProvidersFoundInChain // -// creds := NewChainCredentials( -// []Provider{ -// &EnvProvider{}, -// &EC2RoleProvider{ +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ // Client: ec2metadata.New(sess), // }, // }) // // // Usage of ChainCredentials with aws.Config -// svc := ec2.New(&aws.Config{Credentials: creds}) +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) // type ChainProvider struct { Providers []Provider diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 7b8ebf5f..42416fc2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -14,7 +14,7 @@ // // Example of using the environment variable credentials. // -// creds := NewEnvCredentials() +// creds := credentials.NewEnvCredentials() // // // Retrieve the credentials value // credValue, err := creds.Get() @@ -26,7 +26,7 @@ // This may be helpful to proactively expire credentials and refresh them sooner // than they would naturally expire on their own. // -// creds := NewCredentials(&EC2RoleProvider{}) +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) // creds.Expire() // credsValue, err := creds.Get() // // New credentials will be retrieved instead of from cache. @@ -43,7 +43,7 @@ // func (m *MyProvider) Retrieve() (Value, error) {...} // func (m *MyProvider) IsExpired() bool {...} // -// creds := NewCredentials(&MyProvider{}) +// creds := credentials.NewCredentials(&MyProvider{}) // credValue, err := creds.Get() // package credentials @@ -60,7 +60,9 @@ import ( // when making service API calls. For example, when accessing public // s3 buckets. // -// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) // // Access public S3 buckets. // // @readonly @@ -88,7 +90,7 @@ type Value struct { // The Provider should not need to implement its own mutexes, because // that will be managed by Credentials. type Provider interface { - // Refresh returns nil if it successfully retrieved the value. + // Retrieve returns nil if it successfully retrieved the value. // Error is returned if the value were not obtainable, or empty. Retrieve() (Value, error) @@ -97,6 +99,27 @@ type Provider interface { IsExpired() bool } +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + // A Expiry provides shared expiration logic to be used by credentials // providers to implement expiry functionality. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go index 96655bc4..c14231a1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -29,6 +29,7 @@ var ( // Environment variables used: // // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY type EnvProvider struct { retrieved bool diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index 7fb7cbf0..51e21e0f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -3,11 +3,11 @@ package credentials import ( "fmt" "os" - "path/filepath" "github.com/go-ini/ini" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) // SharedCredsProviderName provides a name of SharedCreds provider @@ -15,8 +15,6 @@ const SharedCredsProviderName = "SharedCredentialsProvider" var ( // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - // - // @readonly ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) ) @@ -117,22 +115,23 @@ func loadProfile(filename, profile string) (Value, error) { // // Will return an error if the user's home directory path cannot be found. func (p *SharedCredentialsProvider) filename() (string, error) { - if p.Filename == "" { - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { - return p.Filename, nil - } - - homeDir := os.Getenv("HOME") // *nix - if homeDir == "" { // Windows - homeDir = os.Getenv("USERPROFILE") - } - if homeDir == "" { - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = filepath.Join(homeDir, ".aws", "credentials") + if len(p.Filename) != 0 { + return p.Filename, nil } + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + return p.Filename, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 30c847ae..4108e433 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -1,7 +1,81 @@ -// Package stscreds are credential Providers to retrieve STS AWS credentials. -// -// STS provides multiple ways to retrieve credentials which can be used when making -// future AWS service API operation calls. +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ package stscreds import ( @@ -9,11 +83,31 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/service/sts" ) +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Printf("Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + // ProviderName provides a name of AssumeRole provider const ProviderName = "AssumeRoleProvider" @@ -27,8 +121,15 @@ type AssumeRoler interface { var DefaultDuration = time.Duration(15) * time.Minute // AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. This provider must be used explicitly, -// as it is not included in the credentials chain. +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. type AssumeRoleProvider struct { credentials.Expiry @@ -65,8 +166,23 @@ type AssumeRoleProvider struct { // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. TokenCode *string + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + // ExpiryWindow will allow the credentials to trigger refreshing prior to // the credentials actually expiring. This is beneficial so race conditions // with expiring credentials do not cause request to fail unexpectedly @@ -85,6 +201,10 @@ type AssumeRoleProvider struct { // // Takes a Config provider to create the STS client. The ConfigProvider is // satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { p := &AssumeRoleProvider{ Client: sts.New(c), @@ -103,7 +223,11 @@ func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*As // AssumeRoleProvider. The credentials will expire every 15 minutes and the // role will be named after a nanosecond timestamp of this operation. // -// Takes an AssumeRoler which can be satisfiede by the STS client. +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { p := &AssumeRoleProvider{ Client: svc, @@ -139,12 +263,25 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { if p.Policy != nil { input.Policy = p.Policy } - if p.SerialNumber != nil && p.TokenCode != nil { - input.SerialNumber = p.SerialNumber - input.TokenCode = p.TokenCode + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } } - roleOutput, err := p.Client.AssumeRole(input) + roleOutput, err := p.Client.AssumeRole(input) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 0ef55040..07afe3b8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -10,10 +10,12 @@ package defaults import ( "fmt" "net/http" + "net/url" "os" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" @@ -56,7 +58,6 @@ func Config() *aws.Config { WithMaxRetries(aws.UseServiceDefaultRetries). WithLogger(aws.NewDefaultLogger()). WithLogLevel(aws.LogOff). - WithSleepDelay(time.Sleep). WithEndpointResolver(endpoints.DefaultResolver()) } @@ -97,23 +98,51 @@ func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credenti }) } -// RemoteCredProvider returns a credenitials provider for the default remote +const ( + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote // endpoints such as EC2 or ECS Roles. func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } - if len(ecsCredURI) > 0 { - return ecsCredProvider(cfg, handlers, ecsCredURI) + if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("http://169.254.170.2%s", uri) + return httpCredProvider(cfg, handlers, u) } return ec2RoleProvider(cfg, handlers) } -func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider { - const host = `169.254.170.2` +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string - return endpointcreds.NewProviderClient(cfg, handlers, - fmt.Sprintf("http://%s%s", host, uri), + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { + errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 00000000..ca0ee1dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 00000000..4fcb6161 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index aa29a407..fff35fc5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. package endpoints @@ -44,28 +44,33 @@ const ( // Service identifiers const ( AcmServiceID = "acm" // Acm. + ApiPricingServiceID = "api.pricing" // ApiPricing. ApigatewayServiceID = "apigateway" // Apigateway. ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - AppstreamServiceID = "appstream" // Appstream. Appstream2ServiceID = "appstream2" // Appstream2. + AthenaServiceID = "athena" // Athena. AutoscalingServiceID = "autoscaling" // Autoscaling. BatchServiceID = "batch" // Batch. BudgetsServiceID = "budgets" // Budgets. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. CloudformationServiceID = "cloudformation" // Cloudformation. CloudfrontServiceID = "cloudfront" // Cloudfront. CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. CloudsearchServiceID = "cloudsearch" // Cloudsearch. CloudtrailServiceID = "cloudtrail" // Cloudtrail. CodebuildServiceID = "codebuild" // Codebuild. CodecommitServiceID = "codecommit" // Codecommit. CodedeployServiceID = "codedeploy" // Codedeploy. CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. CognitoIdpServiceID = "cognito-idp" // CognitoIdp. CognitoSyncServiceID = "cognito-sync" // CognitoSync. ConfigServiceID = "config" // Config. CurServiceID = "cur" // Cur. DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. DevicefarmServiceID = "devicefarm" // Devicefarm. DirectconnectServiceID = "directconnect" // Directconnect. DiscoveryServiceID = "discovery" // Discovery. @@ -83,11 +88,14 @@ const ( ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. EsServiceID = "es" // Es. EventsServiceID = "events" // Events. FirehoseServiceID = "firehose" // Firehose. GameliftServiceID = "gamelift" // Gamelift. GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. HealthServiceID = "health" // Health. IamServiceID = "iam" // Iam. ImportexportServiceID = "importexport" // Importexport. @@ -102,10 +110,14 @@ const ( MachinelearningServiceID = "machinelearning" // Machinelearning. MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. OpsworksServiceID = "opsworks" // Opsworks. OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. PinpointServiceID = "pinpoint" // Pinpoint. PollyServiceID = "polly" // Polly. RdsServiceID = "rds" // Rds. @@ -113,6 +125,7 @@ const ( RekognitionServiceID = "rekognition" // Rekognition. Route53ServiceID = "route53" // Route53. Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. S3ServiceID = "s3" // S3. SdbServiceID = "sdb" // Sdb. ServicecatalogServiceID = "servicecatalog" // Servicecatalog. @@ -128,8 +141,10 @@ const ( StsServiceID = "sts" // Sts. SupportServiceID = "support" // Support. SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. WafServiceID = "waf" // Waf. WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. WorkspacesServiceID = "workspaces" // Workspaces. XrayServiceID = "xray" // Xray. ) @@ -137,17 +152,20 @@ const ( // DefaultResolver returns an Endpoint resolver that will be able // to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). // -// Casting the return value of this func to a EnumPartitions will -// allow you to get a list of the partitions in the order the endpoints -// will be resolved in. +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). // -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// partitions := endpoints.DefaultPartitions // for _, p := range partitions { // // ... inspect partitions // } -func DefaultResolver() Resolver { - return defaultPartitions +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() } var defaultPartitions = partitions{ @@ -240,15 +258,30 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, "apigateway": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -280,13 +313,6 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "appstream": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, "appstream2": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -301,6 +327,19 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "athena": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -325,7 +364,15 @@ var awsPartition = partition{ "batch": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "budgets": service{ @@ -341,6 +388,18 @@ var awsPartition = partition{ }, }, }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "cloudformation": service{ Endpoints: endpoints{ @@ -389,6 +448,22 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "cloudhsmv2": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "cloudsearch": service{ Endpoints: endpoints{ @@ -426,18 +501,36 @@ var awsPartition = partition{ "codebuild": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "codecommit": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "codedeploy": service{ @@ -463,13 +556,32 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -478,9 +590,12 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -491,9 +606,12 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -504,9 +622,12 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -547,6 +668,18 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "devicefarm": service{ Endpoints: endpoints{ @@ -601,11 +734,17 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -673,6 +812,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -689,6 +829,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -742,15 +883,17 @@ var awsPartition = partition{ "elasticfilesystem": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticloadbalancing": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -816,6 +959,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "es": service{ Endpoints: endpoints{ @@ -824,8 +977,10 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -841,8 +996,10 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -853,9 +1010,12 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "gamelift": service{ @@ -865,10 +1025,15 @@ var awsPartition = partition{ "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -880,6 +1045,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, @@ -891,6 +1057,27 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "glue": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "health": service{ Endpoints: endpoints{ @@ -934,6 +1121,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -950,6 +1138,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1006,10 +1195,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1019,7 +1212,16 @@ var awsPartition = partition{ "lightsail": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "logs": service{ @@ -1062,21 +1264,43 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "mgh": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, "mobileanalytics": service{ Endpoints: endpoints{ "us-east-1": endpoint{}, }, }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "monitoring": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -1098,6 +1322,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, "opsworks": service{ Endpoints: endpoints{ @@ -1124,6 +1358,19 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "pinpoint": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -1137,10 +1384,20 @@ var awsPartition = partition{ "polly": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "rds": service{ @@ -1210,6 +1467,17 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, "s3": service{ PartitionEndpoint: "us-east-1", IsRegionalized: boxedTrue, @@ -1222,23 +1490,23 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-northeast-1": endpoint{ - Hostname: "s3-ap-northeast-1.amazonaws.com", + Hostname: "s3.ap-northeast-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{ - Hostname: "s3-ap-southeast-1.amazonaws.com", + Hostname: "s3.ap-southeast-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "ap-southeast-2": endpoint{ - Hostname: "s3-ap-southeast-2.amazonaws.com", + Hostname: "s3.ap-southeast-2.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{ - Hostname: "s3-eu-west-1.amazonaws.com", + Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "eu-west-2": endpoint{}, @@ -1250,7 +1518,7 @@ var awsPartition = partition{ }, }, "sa-east-1": endpoint{ - Hostname: "s3-sa-east-1.amazonaws.com", + Hostname: "s3.sa-east-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "us-east-1": endpoint{ @@ -1259,11 +1527,11 @@ var awsPartition = partition{ }, "us-east-2": endpoint{}, "us-west-1": endpoint{ - Hostname: "s3-us-west-1.amazonaws.com", + Hostname: "s3.us-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "us-west-2": endpoint{ - Hostname: "s3-us-west-2.amazonaws.com", + Hostname: "s3.us-west-2.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, }, @@ -1290,14 +1558,18 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1314,20 +1586,27 @@ var awsPartition = partition{ "sms": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "snowball": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1384,10 +1663,13 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1399,7 +1681,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1410,6 +1695,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -1425,7 +1711,7 @@ var awsPartition = partition{ }, "streams.dynamodb": service{ Defaults: endpoint{ - Protocols: []string{"http", "http", "https", "https"}, + Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Service: "dynamodb", }, @@ -1480,9 +1766,33 @@ var awsPartition = partition{ "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "support": service{ @@ -1510,6 +1820,25 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "tagging": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "waf": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -1529,6 +1858,18 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1540,6 +1881,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1552,8 +1894,10 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1590,6 +1934,24 @@ var awscnPartition = partition{ }, }, Services: services{ + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -1610,6 +1972,18 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ @@ -1649,6 +2023,18 @@ var awscnPartition = partition{ }, }, }, + "ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ @@ -1663,7 +2049,7 @@ var awscnPartition = partition{ }, "elasticloadbalancing": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ "cn-north-1": endpoint{}, @@ -1677,6 +2063,7 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "es": service{}, "events": service{ Endpoints: endpoints{ @@ -1704,12 +2091,28 @@ var awscnPartition = partition{ }, }, }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ "cn-north-1": endpoint{}, }, }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "logs": service{ Endpoints: endpoints{ @@ -1745,6 +2148,12 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "sns": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -1762,6 +2171,12 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "storagegateway": service{ Endpoints: endpoints{ @@ -1770,7 +2185,7 @@ var awscnPartition = partition{ }, "streams.dynamodb": service{ Defaults: endpoint{ - Protocols: []string{"http", "http", "https", "https"}, + Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Service: "dynamodb", }, @@ -1787,6 +2202,12 @@ var awscnPartition = partition{ }, "swf": service{ + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "tagging": service{ + Endpoints: endpoints{ "cn-north-1": endpoint{}, }, @@ -1820,6 +2241,18 @@ var awsusgovPartition = partition{ }, }, Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "autoscaling": service{ Endpoints: endpoints{ @@ -1846,6 +2279,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ @@ -1858,10 +2297,22 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "dms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "dynamodb": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "ec2": service{ @@ -1887,6 +2338,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "elasticloadbalancing": service{ Endpoints: endpoints{ @@ -1903,6 +2360,12 @@ var awsusgovPartition = partition{ }, }, }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "glacier": service{ Endpoints: endpoints{ @@ -1924,12 +2387,24 @@ var awsusgovPartition = partition{ }, }, }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "kms": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, + "lambda": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "logs": service{ Endpoints: endpoints{ @@ -1954,6 +2429,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "s3": service{ Defaults: endpoint{ SignatureVersions: []string{"s3", "s3v4"}, @@ -1966,11 +2447,17 @@ var awsusgovPartition = partition{ }, }, "us-gov-west-1": endpoint{ - Hostname: "s3-us-gov-west-1.amazonaws.com", + Hostname: "s3.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, }, }, }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "snowball": service{ Endpoints: endpoints{ @@ -1994,6 +2481,12 @@ var awsusgovPartition = partition{ }, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "streams.dynamodb": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -2002,6 +2495,12 @@ var awsusgovPartition = partition{ }, Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "sts": service{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go index a0e9bc45..84316b92 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -21,12 +21,12 @@ // partitions := resolver.(endpoints.EnumPartitions).Partitions() // // for _, p := range partitions { -// fmt.Println("Regions for", p.Name) +// fmt.Println("Regions for", p.ID()) // for id, _ := range p.Regions() { // fmt.Println("*", id) // } // -// fmt.Println("Services for", p.Name) +// fmt.Println("Services for", p.ID()) // for id, _ := range p.Services() { // fmt.Println("*", id) // } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 3adec131..9c3eedb4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -27,6 +27,25 @@ type Options struct { // error will be returned. This option will prevent returning endpoints // that look valid, but may not resolve to any real endpoint. StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unkonwn and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool } // Set combines all of the option functions together. @@ -54,6 +73,12 @@ func StrictMatchingOption(o *Options) { o.StrictMatching = true } +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + // A Resolver provides the interface for functionality to resolve endpoints. // The build in Partition and DefaultResolver return value satisfy this interface. type Resolver interface { @@ -99,6 +124,49 @@ type EnumPartitions interface { Partitions() []Partition } +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + // A Partition provides the ability to enumerate the partition's regions // and services. type Partition struct { @@ -107,33 +175,36 @@ type Partition struct { } // ID returns the identifier of the partition. -func (p *Partition) ID() string { return p.id } +func (p Partition) ID() string { return p.id } // EndpointFor attempts to resolve the endpoint based on service and region. // See Options for information on configuring how the endpoint is resolved. // // If the service cannot be found in the metadata the UnknownServiceError // error will be returned. This validation will occur regardless if -// StrictMatching is enabled. +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. // // When resolving endpoints you can choose to enable StrictMatching. This will // require the provided service and region to be known by the partition. // If the endpoint cannot be strictly resolved an error will be returned. This // mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the enpoint returned my look valid but may not work. +// StrictMatching enabled the endpoint returned my look valid but may not work. // StrictMatching requires the SDK to be updated if you want to take advantage -// of new regions and services expantions. +// of new regions and services expansions. // // Errors that can be returned. // * UnknownServiceError // * UnknownEndpointError -func (p *Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { return p.p.EndpointFor(service, region, opts...) } // Regions returns a map of Regions indexed by their ID. This is useful for // enumerating over the regions in a partition. -func (p *Partition) Regions() map[string]Region { +func (p Partition) Regions() map[string]Region { rs := map[string]Region{} for id := range p.p.Regions { rs[id] = Region{ @@ -147,7 +218,7 @@ func (p *Partition) Regions() map[string]Region { // Services returns a map of Service indexed by their ID. This is useful for // enumerating over the services in a partition. -func (p *Partition) Services() map[string]Service { +func (p Partition) Services() map[string]Service { ss := map[string]Service{} for id := range p.p.Services { ss[id] = Service{ @@ -167,16 +238,16 @@ type Region struct { } // ID returns the region's identifier. -func (r *Region) ID() string { return r.id } +func (r Region) ID() string { return r.id } // ResolveEndpoint resolves an endpoint from the context of the region given // a service. See Partition.EndpointFor for usage and errors that can be returned. -func (r *Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { return r.p.EndpointFor(service, r.id, opts...) } // Services returns a list of all services that are known to be in this region. -func (r *Region) Services() map[string]Service { +func (r Region) Services() map[string]Service { ss := map[string]Service{} for id, s := range r.p.Services { if _, ok := s.Endpoints[r.id]; ok { @@ -198,17 +269,38 @@ type Service struct { } // ID returns the identifier for the service. -func (s *Service) ID() string { return s.id } +func (s Service) ID() string { return s.id } // ResolveEndpoint resolves an endpoint from the context of a service given // a region. See Partition.EndpointFor for usage and errors that can be returned. -func (s *Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { return s.p.EndpointFor(s.id, region, opts...) } +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if _, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + p: s.p, + } + } + } + + return rs +} + // Endpoints returns a map of Endpoints indexed by their ID for all known // endpoints for a service. -func (s *Service) Endpoints() map[string]Endpoint { +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { es := map[string]Endpoint{} for id := range s.p.Services[s.id].Endpoints { es[id] = Endpoint{ @@ -231,15 +323,15 @@ type Endpoint struct { } // ID returns the identifier for an endpoint. -func (e *Endpoint) ID() string { return e.id } +func (e Endpoint) ID() string { return e.id } // ServiceID returns the identifier the endpoint belongs to. -func (e *Endpoint) ServiceID() string { return e.serviceID } +func (e Endpoint) ServiceID() string { return e.serviceID } // ResolveEndpoint resolves an endpoint from the context of a service and // region the endpoint represents. See Partition.EndpointFor for usage and // errors that can be returned. -func (e *Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { return e.p.EndpointFor(e.serviceID, e.id, opts...) } @@ -272,28 +364,6 @@ type EndpointNotFoundError struct { Region string } -//// NewEndpointNotFoundError builds and returns NewEndpointNotFoundError. -//func NewEndpointNotFoundError(p, s, r string) EndpointNotFoundError { -// return EndpointNotFoundError{ -// awsError: awserr.New("EndpointNotFoundError", "unable to find endpoint", nil), -// Partition: p, -// Service: s, -// Region: r, -// } -//} -// -//// Error returns string representation of the error. -//func (e EndpointNotFoundError) Error() string { -// extra := fmt.Sprintf("partition: %q, service: %q, region: %q", -// e.Partition, e.Service, e.Region) -// return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -//} -// -//// String returns the string representation of the error. -//func (e EndpointNotFoundError) String() string { -// return e.Error() -//} - // A UnknownServiceError is returned when the service does not resolve to an // endpoint. Includes a list of all known services for the partition. Returned // when a partition does not support the service. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index 6522ce9b..13d968a2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -79,7 +79,9 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) ( opt.Set(opts...) s, hasService := p.Services[service] - if !hasService { + if !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go index 1e7369db..05e92df2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -158,7 +158,7 @@ var funcMap = template.FuncMap{ const v3Tmpl = ` {{ define "defaults" -}} -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. package endpoints @@ -209,17 +209,20 @@ import ( // DefaultResolver returns an Endpoint resolver that will be able // to resolve endpoints for: {{ ListPartitionNames . }}. // - // Casting the return value of this func to a EnumPartitions will - // allow you to get a list of the partitions in the order the endpoints - // will be resolved in. + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. // - // resolver := endpoints.DefaultResolver() - // partitions := resolver.(endpoints.EnumPartitions).Partitions() + // partitions := endpoints.DefaultPartitions // for _, p := range partitions { // // ... inspect partitions // } - func DefaultResolver() Resolver { - return defaultPartitions + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() } var defaultPartitions = partitions{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 00000000..91a6f277 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go index db87188e..3babb5ab 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -26,14 +26,14 @@ func (l *LogLevelType) Value() LogLevelType { // Matches returns true if the v LogLevel is enabled by this LogLevel. Should be // used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nill, will default to LogOff comparison. +// LogLevel is nil, will default to LogOff comparison. func (l *LogLevelType) Matches(v LogLevelType) bool { c := l.Value() return c&v == v } // AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default // to LogOff comparison. func (l *LogLevelType) AtLeast(v LogLevelType) bool { c := l.Value() diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 00000000..271da432 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +// +build !appengine,!plan9 + +package request + +import ( + "net" + "os" + "syscall" +) + +func isErrConnectionReset(err error) bool { + if opErr, ok := err.(*net.OpError); ok { + if sysErr, ok := opErr.Err.(*os.SyscallError); ok { + return sysErr.Err == syscall.ECONNRESET + } + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go new file mode 100644 index 00000000..daf9eca4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go @@ -0,0 +1,11 @@ +// +build appengine plan9 + +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + return strings.Contains(err.Error(), "connection reset") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 5279c19c..802ac88a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -18,6 +18,7 @@ type Handlers struct { UnmarshalError HandlerList Retry HandlerList AfterRetry HandlerList + Complete HandlerList } // Copy returns of this handler's lists. @@ -33,6 +34,7 @@ func (h *Handlers) Copy() Handlers { UnmarshalMeta: h.UnmarshalMeta.copy(), Retry: h.Retry.copy(), AfterRetry: h.AfterRetry.copy(), + Complete: h.Complete.copy(), } } @@ -48,6 +50,7 @@ func (h *Handlers) Clear() { h.ValidateResponse.Clear() h.Retry.Clear() h.AfterRetry.Clear() + h.Complete.Clear() } // A HandlerListRunItem represents an entry in the HandlerList which @@ -85,13 +88,17 @@ func (l *HandlerList) copy() HandlerList { n := HandlerList{ AfterEachFn: l.AfterEachFn, } - n.list = append([]NamedHandler{}, l.list...) + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) return n } // Clear clears the handler list. func (l *HandlerList) Clear() { - l.list = []NamedHandler{} + l.list = l.list[0:0] } // Len returns the number of handlers in the list. @@ -101,33 +108,85 @@ func (l *HandlerList) Len() int { // PushBack pushes handler f to the back of the handler list. func (l *HandlerList) PushBack(f func(*Request)) { - l.list = append(l.list, NamedHandler{"__anonymous", f}) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) + l.PushBackNamed(NamedHandler{"__anonymous", f}) } // PushBackNamed pushes named handler f to the back of the handler list. func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } l.list = append(l.list, n) } +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + // PushFrontNamed pushes named handler f to the front of the handler list. func (l *HandlerList) PushFrontNamed(n NamedHandler) { - l.list = append([]NamedHandler{n}, l.list...) + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } } // Remove removes a NamedHandler n func (l *HandlerList) Remove(n NamedHandler) { - newlist := []NamedHandler{} - for _, m := range l.list { - if m.Name != n.Name { - newlist = append(newlist, m) + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- } } - l.list = newlist +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } } // Run executes all handlers in the list with a given request object. @@ -163,6 +222,16 @@ func HandlerListStopOnError(item HandlerListRunItem) bool { return item.Request.Error == nil } +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + // MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request // header. If the extra parameters are provided they will be added as metadata to the // name/version pair resulting in the following format. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 77312bb6..5c7db498 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -16,6 +16,28 @@ import ( "github.com/aws/aws-sdk-go/aws/client/metadata" ) +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" +) + // A Request is the service request to be made. type Request struct { Config aws.Config @@ -23,30 +45,37 @@ type Request struct { Handlers Handlers Retryer - Time time.Time - ExpireTime time.Duration - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context built bool - // Need to persist an intermideant body betweend the input Body and HTTP + // Need to persist an intermediate body between the input Body and HTTP // request body because the HTTP Client's transport can maintain a reference // to the HTTP request's body after the client has returned. This value is - // safe to use concurrently and rewraps the input Body for each HTTP request. + // safe to use concurrently and wrap the input Body for each HTTP request. safeBody *offsetReader } @@ -60,14 +89,6 @@ type Operation struct { BeforePresignFn func(r *Request) error } -// Paginator keeps track of pagination configuration for an API operation. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - // New returns a new Request pointer for the service API // operation and parameters. // @@ -91,6 +112,8 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) } + SanitizeHostForHeader(httpReq) + r := &Request{ Config: cfg, ClientInfo: clientInfo, @@ -111,6 +134,94 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, return r } +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + // WillRetry returns if the request's can be retried. func (r *Request) WillRetry() bool { return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() @@ -149,34 +260,59 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) { // Presign returns the request's signed URL. Error will be returned // if the signing fails. -func (r *Request) Presign(expireTime time.Duration) (string, error) { - r.ExpireTime = expireTime +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. r.NotHoist = false + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + if r.Operation.BeforePresignFn != nil { - r = r.copy() - err := r.Operation.BeforePresignFn(r) - if err != nil { - return "", err + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err } } - r.Sign() - if r.Error != nil { - return "", r.Error + if err := r.Sign(); err != nil { + return "", nil, err } - return r.HTTPRequest.URL.String(), nil -} -// PresignRequest behaves just like presign, but hoists all headers and signs them. -// Also returns the signed hash back to the user -func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { - r.ExpireTime = expireTime - r.NotHoist = true - r.Sign() - if r.Error != nil { - return "", nil, r.Error - } return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } @@ -237,10 +373,7 @@ func (r *Request) Sign() error { return r.Error } -// ResetBody rewinds the request body backto its starting position, and -// set's the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -func (r *Request) ResetBody() { +func (r *Request) getNextRequestBody() (io.ReadCloser, error) { if r.safeBody != nil { r.safeBody.Close() } @@ -262,14 +395,14 @@ func (r *Request) ResetBody() { // Related golang/go#18257 l, err := computeBodyLength(r.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to compute request body size", err) - return + return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) } + var body io.ReadCloser if l == 0 { - r.HTTPRequest.Body = noBodyReader + body = NoBody } else if l > 0 { - r.HTTPRequest.Body = r.safeBody + body = r.safeBody } else { // Hack to prevent sending bodies for methods where the body // should be ignored by the server. Sending bodies on these @@ -281,11 +414,13 @@ func (r *Request) ResetBody() { // a io.Reader that was not also an io.Seeker. switch r.Operation.HTTPMethod { case "GET", "HEAD", "DELETE": - r.HTTPRequest.Body = noBodyReader + body = NoBody default: - r.HTTPRequest.Body = r.safeBody + body = r.safeBody } } + + return body, nil } // Attempts to compute the length of the body of the reader using the @@ -344,6 +479,12 @@ func (r *Request) GetBody() io.ReadSeeker { // // Send will not close the request.Request's body. func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + for { if aws.BoolValue(r.Retryable) { if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { @@ -381,7 +522,7 @@ func (r *Request) Send() error { r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) if r.Error != nil { - debugLogReqError(r, "Send Request", false, r.Error) + debugLogReqError(r, "Send Request", false, err) return r.Error } debugLogReqError(r, "Send Request", true, err) @@ -390,12 +531,13 @@ func (r *Request) Send() error { r.Handlers.UnmarshalMeta.Run(r) r.Handlers.ValidateResponse.Run(r) if r.Error != nil { - err := r.Error r.Handlers.UnmarshalError.Run(r) + err := r.Error + r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) if r.Error != nil { - debugLogReqError(r, "Validate Response", false, r.Error) + debugLogReqError(r, "Validate Response", false, err) return r.Error } debugLogReqError(r, "Validate Response", true, err) @@ -408,7 +550,7 @@ func (r *Request) Send() error { r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, r.Error) + debugLogReqError(r, "Unmarshal Response", false, err) return r.Error } debugLogReqError(r, "Unmarshal Response", true, err) @@ -446,6 +588,9 @@ func shouldRetryCancel(r *Request) bool { timeoutErr := false errStr := r.Error.Error() if ok { + if awsErr.Code() == CanceledErrorCode { + return false + } err := awsErr.OrigErr() netErr, netOK := err.(net.Error) timeoutErr = netOK && netErr.Temporary() @@ -463,3 +608,72 @@ func shouldRetryCancel(r *Request) bool { errStr != "net/http: request canceled while waiting for connection") } + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go index 1323af90..869b97a1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -16,6 +16,24 @@ func (noBody) Read([]byte) (int, error) { return 0, io.EOF } func (noBody) Close() error { return nil } func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } -// Is an empty reader that will trigger the Go HTTP client to not include +// NoBody is an empty reader that will trigger the Go HTTP client to not include // and body in the HTTP request. -var noBodyReader = noBody{} +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// set's the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index 8b963f4d..c32fc69b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -2,8 +2,32 @@ package request -import "net/http" +import ( + "net/http" +) -// Is a http.NoBody reader instructing Go HTTP client to not include +// NoBody is a http.NoBody reader instructing Go HTTP client to not include // and body in the HTTP request. -var noBodyReader = http.NoBody +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// set's the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 00000000..a7365cd1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 00000000..307fa070 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index 2939ec47..59de6736 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -2,29 +2,125 @@ package request import ( "reflect" + "sync/atomic" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" ) -//type Paginater interface { -// HasNextPage() bool -// NextPage() *Request -// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error -//} +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// cont := true +// for p.Next() && cont { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// } +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) -// HasNextPage returns true if this request has more pages of data available. -func (r *Request) HasNextPage() bool { - return len(r.nextPageTokens()) > 0 + started bool + nextTokens []interface{} + + err error + curPage interface{} } -// nextPageTokens returns the tokens to use when asking for the next page of -// data. +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + return !(p.started && len(p.nextTokens) == 0) +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. func (r *Request) nextPageTokens() []interface{} { if r.Operation.Paginator == nil { return nil } - if r.Operation.TruncationToken != "" { tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) if len(tr) == 0 { @@ -61,9 +157,40 @@ func (r *Request) nextPageTokens() []interface{} { return tokens } +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + // NextPage returns a new Request that can be executed to return the next // page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + tokens := r.nextPageTokens() if len(tokens) == 0 { return nil @@ -90,7 +217,12 @@ func (r *Request) NextPage() *Request { // as the structure "T". The lastPage value represents whether the page is // the last page of data or not. The return value of this function should // return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + for page := r; page != nil; page = page.NextPage() { if err := page.Send(); err != nil { return err diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index ebd60ccc..f35fef21 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -8,7 +8,7 @@ import ( ) // Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the service.DefaultRetryer +// The default implementation used by most services is the client.DefaultRetryer // structure, which contains basic retry logic using exponential backoff. type Retryer interface { RetryRules(*Request) time.Duration @@ -26,8 +26,10 @@ func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { // retryableCodes is a collection of service response codes which are retry-able // without any further action. var retryableCodes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, + "RequestError": {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout } var throttleCodes = map[string]struct{}{ @@ -36,7 +38,6 @@ var throttleCodes = map[string]struct{}{ "ThrottlingException": {}, "RequestLimitExceeded": {}, "RequestThrottled": {}, - "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once "TooManyRequestsException": {}, // Lambda functions "PriorRequestNotComplete": {}, // Route53 } @@ -68,35 +69,93 @@ func isCodeExpiredCreds(code string) bool { return ok } +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +type temporaryError interface { + Temporary() bool +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporaryError); ok { + return t.Temporary() + } + + return isErrConnectionReset(err) +} + // IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -func (r *Request) IsErrorRetryable() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeRetryable(err.Code()) +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) } } return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set -func (r *Request) IsErrorThrottle() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeThrottle(err.Code()) +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeThrottle(aerr.Code()) } } return false } -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -func (r *Request) IsErrorExpired() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeExpiredCreds(err.Code()) +// IsErrorExpiredCreds returns whether the error code is a credential expiry error. +// Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeExpiredCreds(aerr.Code()) } } return false } + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + return IsErrorThrottle(r.Error) +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 00000000..09a44eb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go index 2520286b..40124622 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -220,7 +220,7 @@ type ErrParamMinLen struct { func NewErrParamMinLen(field string, min int) *ErrParamMinLen { return &ErrParamMinLen{ errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, + code: ParamMinLenErrCode, field: field, msg: fmt.Sprintf("minimum field size of %v", min), }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 00000000..4601f883 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index d3dc8404..ea7b886f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -23,7 +23,7 @@ additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. Alternatively you can explicitly create a Session with shared config enabled. To do this you can use NewSessionWithOptions to configure how the Session will be created. Using the NewSessionWithOptions with SharedConfigState set to -SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG +SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG environment variable was set. Creating Sessions @@ -45,16 +45,16 @@ region, and profile loaded from the environment and shared config automatically. Requires the AWS_PROFILE to be set, or "default" is used. // Create Session - sess, err := session.NewSession() + sess := session.Must(session.NewSession()) // Create a Session with a custom region - sess, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")}) + sess := session.Must(session.NewSession(&aws.Config{ + Region: aws.String("us-east-1"), + })) // Create a S3 client instance from a session - sess, err := session.NewSession() - if err != nil { - // Handle Session creation error - } + sess := session.Must(session.NewSession()) + svc := s3.New(sess) Create Session With Option Overrides @@ -67,23 +67,25 @@ Use NewSessionWithOptions when you want to provide the config profile, or override the shared config state (AWS_SDK_LOAD_CONFIG). // Equivalent to session.NewSession() - sess, err := session.NewSessionWithOptions(session.Options{}) + sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Options + })) // Specify profile to load for the session's config - sess, err := session.NewSessionWithOptions(session.Options{ + sess := session.Must(session.NewSessionWithOptions(session.Options{ Profile: "profile_name", - }) + })) // Specify profile for config and region for requests - sess, err := session.NewSessionWithOptions(session.Options{ + sess := session.Must(session.NewSessionWithOptions(session.Options{ Config: aws.Config{Region: aws.String("us-east-1")}, Profile: "profile_name", - }) + })) // Force enable Shared Config support - sess, err := session.NewSessionWithOptions(session.Options{ - SharedConfigState: SharedConfigEnable, - }) + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) Adding Handlers @@ -93,7 +95,8 @@ handler logs every request and its payload made by a service client: // Create a session, and add additional handlers for all service // clients created with the Session to inherit. Adds logging handler. - sess, err := session.NewSession() + sess := session.Must(session.NewSession()) + sess.Handlers.Send.PushFront(func(r *request.Request) { // Log every request made and its payload logger.Println("Request: %s/%s, Payload: %s", @@ -121,9 +124,8 @@ file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both files have the same format. If both config files are present the configuration from both files will be -read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared credentials -file (~/.aws/config). +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). Credentials are the values the SDK should use for authenticating requests with AWS Services. They arfrom a configuration file will need to include both @@ -138,15 +140,14 @@ the other two fields are also provided. Assume Role values allow you to configure the SDK to assume an IAM role using a set of credentials provided in a config file via the source_profile field. -Both "role_arn" and "source_profile" are required. The SDK does not support -assuming a role with MFA token Via the Session's constructor. You can use the -stscreds.AssumeRoleProvider credentials provider to specify custom -configuration and support for MFA. +Both "role_arn" and "source_profile" are required. The SDK supports assuming +a role with MFA token if the session option AssumeRoleTokenProvider +is set. role_arn = arn:aws:iam:::role/ source_profile = profile_with_creds external_id = 1234 - mfa_serial = not supported! + mfa_serial = role_session_name = session_name Region is the region the SDK should use for looking up AWS service endpoints @@ -154,6 +155,37 @@ and signing requests. region = us-east-1 +Assume Role with MFA token + +To create a session with support for assuming an IAM role with MFA set the +session option AssumeRoleTokenProvider to a function that will prompt for the +MFA token code when the SDK assumes the role and refreshes the role's credentials. +This allows you to configure the SDK via the shared config to assumea role +with MFA tokens. + +In order for the SDK to assume a role with MFA the SharedConfigState +session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG +environment variable set. + +The shared configuration instructs the SDK to assume an IAM role with MFA +when the mfa_serial configuration field is set in the shared config +(~/.aws/config) or shared credentials (~/.aws/credentials) file. + +If mfa_serial is set in the configuration, the SDK will assume the role, and +the AssumeRoleTokenProvider session option is not set an an error will +be returned when creating the session. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess) + +To setup assume role outside of a session see the stscrds.AssumeRoleProvider +documentation. + Environment Variables When a Session is created several environment variables can be set to adjust @@ -218,6 +250,24 @@ $HOME/.aws/config on Linux/Unix based systems, and AWS_CONFIG_FILE=$HOME/my_shared_config +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. */ package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index d2f0c844..f1adcf48 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -2,12 +2,14 @@ package session import ( "os" - "path/filepath" "strconv" "github.com/aws/aws-sdk-go/aws/credentials" ) +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + // envConfig is a collection of environment values the SDK will read // setup config from. All environment values are optional. But some values // such as credentials require multiple values to be complete or the values @@ -75,6 +77,24 @@ type envConfig struct { // // AWS_CONFIG_FILE=$HOME/my_shared_config SharedConfigFile string + + // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string } var ( @@ -98,6 +118,12 @@ var ( "AWS_PROFILE", "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -134,7 +160,7 @@ func envConfigLoad(enableSharedConfig bool) envConfig { if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { cfg.Creds = credentials.Value{} } else { - cfg.Creds.ProviderName = "EnvConfigCredentials" + cfg.Creds.ProviderName = EnvProviderName } regionKeys := regionEnvKeys @@ -147,8 +173,10 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.Region, regionKeys) setFromEnvVal(&cfg.Profile, profileKeys) - cfg.SharedCredentialsFile = sharedCredentialsFilename() - cfg.SharedConfigFile = sharedConfigFilename() + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") return cfg } @@ -161,28 +189,3 @@ func setFromEnvVal(dst *string, keys []string) { } } } - -func sharedCredentialsFilename() string { - if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 { - return name - } - - return filepath.Join(userHomeDir(), ".aws", "credentials") -} - -func sharedConfigFilename() string { - if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 { - return name - } - - return filepath.Join(userHomeDir(), ".aws", "config") -} - -func userHomeDir() string { - homeDir := os.Getenv("HOME") // *nix - if len(homeDir) == 0 { // windows - homeDir = os.Getenv("USERPROFILE") - } - - return homeDir -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index c9427e91..9f75d5ac 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -1,7 +1,13 @@ package session import ( + "crypto/tls" + "crypto/x509" "fmt" + "io" + "io/ioutil" + "net/http" + "os" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -40,7 +46,7 @@ type Session struct { // // If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value // the shared config file (~/.aws/config) will also be loaded, in addition to -// the shared credentials file (~/.aws/config). Values set in both the +// the shared credentials file (~/.aws/credentials). Values set in both the // shared config, and shared credentials will be taken from the shared // credentials file. // @@ -52,7 +58,7 @@ func New(cfgs ...*aws.Config) *Session { envCfg := loadEnvConfig() if envCfg.EnableSharedConfig { - s, err := newSession(envCfg, cfgs...) + s, err := newSession(Options{}, envCfg, cfgs...) if err != nil { // Old session.New expected all errors to be discovered when // a request is made, and would report the errors then. This @@ -73,7 +79,7 @@ func New(cfgs ...*aws.Config) *Session { return s } - return oldNewSession(cfgs...) + return deprecatedNewSession(cfgs...) } // NewSession returns a new Session created from SDK defaults, config files, @@ -83,7 +89,7 @@ func New(cfgs ...*aws.Config) *Session { // // If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value // the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/config). Values set in both the +// the shared credentials file (~/.aws/credentials). Values set in both the // shared config, and shared credentials will be taken from the shared // credentials file. Enabling the Shared Config will also allow the Session // to be built with retrieving credentials with AssumeRole set in the config. @@ -92,9 +98,10 @@ func New(cfgs ...*aws.Config) *Session { // control through code how the Session will be created. Such as specifying the // config profile, and controlling if shared config is enabled or not. func NewSession(cfgs ...*aws.Config) (*Session, error) { - envCfg := loadEnvConfig() + opts := Options{} + opts.Config.MergeIn(cfgs...) - return newSession(envCfg, cfgs...) + return NewSessionWithOptions(opts) } // SharedConfigState provides the ability to optionally override the state @@ -147,6 +154,45 @@ type Options struct { // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable // and enable or disable the shared config functionality. SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -155,29 +201,29 @@ type Options struct { // // If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value // the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/config). Values set in both the +// the shared credentials file (~/.aws/credentials). Values set in both the // shared config, and shared credentials will be taken from the shared // credentials file. Enabling the Shared Config will also allow the Session // to be built with retrieving credentials with AssumeRole set in the config. // // // Equivalent to session.New -// sess, err := session.NewSessionWithOptions(session.Options{}) +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) // // // Specify profile to load for the session's config -// sess, err := session.NewSessionWithOptions(session.Options{ +// sess := session.Must(session.NewSessionWithOptions(session.Options{ // Profile: "profile_name", -// }) +// })) // // // Specify profile for config and region for requests -// sess, err := session.NewSessionWithOptions(session.Options{ +// sess := session.Must(session.NewSessionWithOptions(session.Options{ // Config: aws.Config{Region: aws.String("us-east-1")}, // Profile: "profile_name", -// }) +// })) // // // Force enable Shared Config support -// sess, err := session.NewSessionWithOptions(session.Options{ -// SharedConfigState: SharedConfigEnable, -// }) +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig if opts.SharedConfigState == SharedConfigEnable { @@ -197,7 +243,25 @@ func NewSessionWithOptions(opts Options) (*Session, error) { envCfg.EnableSharedConfig = true } - return newSession(envCfg, &opts.Config) + if len(envCfg.SharedCredentialsFile) == 0 { + envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(envCfg.SharedConfigFile) == 0 { + envCfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) } // Must is a helper function to ensure the Session is valid and there was no @@ -215,7 +279,7 @@ func Must(sess *Session, err error) *Session { return sess } -func oldNewSession(cfgs ...*aws.Config) *Session { +func deprecatedNewSession(cfgs ...*aws.Config) *Session { cfg := defaults.Config() handlers := defaults.Handlers() @@ -242,7 +306,7 @@ func oldNewSession(cfgs ...*aws.Config) *Session { return s } -func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() handlers := defaults.Handlers() @@ -251,13 +315,18 @@ func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { userCfg := &aws.Config{} userCfg.MergeIn(cfgs...) - // Order config files will be loaded in with later files overwriting + // Ordered config files will be loaded in with later files overwriting // previous config file values. - cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} - if !envCfg.EnableSharedConfig { - // The shared config file (~/.aws/config) is only loaded if instructed - // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). - cfgFiles = cfgFiles[1:] + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } } // Load additional config from file(s) @@ -266,7 +335,9 @@ func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { return nil, err } - mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers) + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } s := &Session{ Config: cfg, @@ -275,10 +346,62 @@ func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { initHandlers(s) + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } + return s, nil } -func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers) { +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + t = &http.Transport{} + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { // Merge in user provided configuration cfg.MergeIn(userCfg) @@ -302,6 +425,11 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( sharedCfg.AssumeRoleSource.Creds, ) + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } cfg.Credentials = stscreds.NewCredentials( &Session{ Config: &cfgCp, @@ -311,11 +439,16 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share func(opt *stscreds.AssumeRoleProvider) { opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + // Assume role with external ID if len(sharedCfg.AssumeRole.ExternalID) > 0 { opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) } - // MFA not supported + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } }, ) } else if len(sharedCfg.Creds.AccessKeyID) > 0 { @@ -336,6 +469,33 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share }) } } + + return nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the +// MFAToken option is not set when shared config is configured load assume a +// role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) } type credProviderError struct { @@ -404,6 +564,10 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( func(opt *endpoints.Options) { opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true }, ) } @@ -416,3 +580,27 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( SigningName: resolved.SigningName, }, err } + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + + region := aws.StringValue(s.Config.Region) + + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningName: resolved.SigningName, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index b58076f5..09c8e5bc 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -113,7 +113,7 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { f, err := ini.Load(b) if err != nil { - return nil, SharedConfigLoadError{Filename: filename} + return nil, SharedConfigLoadError{Filename: filename, Err: err} } files = append(files, sharedConfigFile{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 00000000..6aa2ed24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 98bfe742..ccc88b4a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -45,7 +45,7 @@ // If signing a request intended for HTTP2 server, and you're using Go 1.6.2 // through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the // request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP // message. URL.Opaque generally will force Go to make requests with absolute URL. // URL.RawPath does not do this, but RawPath must be a valid escaping of Path // or url.EscapedPath will ignore the RawPath escaping. @@ -55,7 +55,6 @@ package v4 import ( - "bytes" "crypto/hmac" "crypto/sha256" "encoding/hex" @@ -194,6 +193,10 @@ type Signer struct { // This value should only be used for testing. If it is nil the default // time.Now will be used. currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool } // NewSigner returns a Signer pointer configured with the credentials and optional @@ -227,6 +230,7 @@ type signingCtx struct { isPresign bool formattedTime string formattedShortTime string + unsignedPayload bool bodyDigest string signedHeaders string @@ -264,7 +268,7 @@ type signingCtx struct { // "X-Amz-Content-Sha256" header with a precomputed value. The signer will // only compute the hash if the request header value is empty. func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, signTime) + return v4.signWithBody(r, body, service, region, 0, false, signTime) } // Presign signs AWS v4 requests with the provided body, service name, region @@ -298,10 +302,10 @@ func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region strin // presigned request's signature you can set the "X-Amz-Content-Sha256" // HTTP header and that will be included in the request's signature. func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, signTime) + return v4.signWithBody(r, body, service, region, exp, true, signTime) } -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { currentTimeFn := v4.currentTimeFn if currentTimeFn == nil { currentTimeFn = time.Now @@ -313,10 +317,11 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi Query: r.URL.Query(), Time: signTime, ExpireTime: exp, - isPresign: exp != 0, + isPresign: isPresign, ServiceName: service, Region: region, DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, } for key := range ctx.Query { @@ -334,6 +339,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi return http.Header{}, err } + ctx.sanitizeHostForHeader() ctx.assignAmzQueryValues() ctx.build(v4.DisableHeaderHoisting) @@ -358,6 +364,10 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi return ctx.SignedHeaderVals, nil } +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + func (ctx *signingCtx) handlePresignRemoval() { if !ctx.isPresign { return @@ -396,7 +406,7 @@ var SignRequestHandler = request.NamedHandler{ } // SignSDKRequest signs an AWS request with the V4 signature. This -// request handler is bested used only with the SDK's built in service client's +// request handler should only be used with the SDK's built in service client's // API operation requests. // // This function should not be used on its on its own, but in conjunction with @@ -409,7 +419,18 @@ var SignRequestHandler = request.NamedHandler{ func SignSDKRequest(req *request.Request) { signSDKRequestWithCurrTime(req, time.Now) } -func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) { + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now, opts...) + }, + } +} + +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { // If the request does not need to be signed ignore the signing of the // request if the AnonymousCredentials object is used. if req.Config.Credentials == credentials.AnonymousCredentials { @@ -441,13 +462,17 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time v4.DisableRequestBodyOverwrite = true }) + for _, opt := range opts { + opt(v4) + } + signingTime := req.Time if !req.LastSignedAt.IsZero() { signingTime = req.LastSignedAt } signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, signingTime, + name, region, req.ExpireTime, req.ExpireTime > 0, signingTime, ) if err != nil { req.Error = err @@ -482,6 +507,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { ctx.buildTime() // no depends ctx.buildCredentialString() // no depends + ctx.buildBodyDigest() + unsignedHeaders := ctx.Request.Header if ctx.isPresign { if !disableHeaderHoisting { @@ -493,7 +520,6 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } } - ctx.buildBodyDigest() ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) ctx.buildCanonicalString() // depends on canon headers / signed headers ctx.buildStringToSign() // depends on canon string @@ -583,14 +609,18 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { headerValues := make([]string, len(headers)) for i, k := range headers { if k == "host" { - headerValues[i] = "host:" + ctx.Request.URL.Host + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } } else { headerValues[i] = k + ":" + strings.Join(ctx.SignedHeaderVals[k], ",") } } - - ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") } func (ctx *signingCtx) buildCanonicalString() { @@ -634,14 +664,14 @@ func (ctx *signingCtx) buildSignature() { func (ctx *signingCtx) buildBodyDigest() { hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") if hash == "" { - if ctx.isPresign && ctx.ServiceName == "s3" { + if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") { hash = "UNSIGNED-PAYLOAD" } else if ctx.Body == nil { hash = emptyStringSHA256 } else { hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) } - if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) } } @@ -692,49 +722,46 @@ func makeSha256Reader(reader io.ReadSeeker) []byte { return hash.Sum(nil) } -const doubleSpaces = " " +const doubleSpace = " " -var doubleSpaceBytes = []byte(doubleSpaces) +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain muliple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - // Trim leading and trailing spaces - trimmed := strings.TrimSpace(str) + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] - idx := strings.Index(trimmed, doubleSpaces) - var buf []byte - for idx > -1 { - // Multiple adjacent spaces found - if buf == nil { - // first time create the buffer - buf = []byte(trimmed) - } + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } - stripToIdx := -1 - for j := idx + 1; j < len(buf); j++ { - if buf[j] != ' ' { - buf = append(buf[:idx+1], buf[j:]...) - stripToIdx = j - break - } - } - - if stripToIdx >= 0 { - idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) - if idx >= 0 { - idx += stripToIdx + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ } + spaces++ } else { - idx = -1 + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ } } - if buf != nil { - vals[i] = string(buf) - } else { - vals[i] = trimmed - } + vals[i] = string(buf[:m]) } - return vals } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 9ca685e9..0e2d864e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -114,5 +114,5 @@ func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { func (b *WriteAtBuffer) Bytes() []byte { b.m.Lock() defer b.m.Unlock() - return b.buf[:len(b.buf):len(b.buf)] + return b.buf } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 00000000..6192b245 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 00000000..0210d272 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index fb5d06a1..5c265d57 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.6.12" +const SDKVersion = "1.12.36" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 00000000..ebcbc2b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 00000000..776d1101 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index f434ab7c..5ce9cba3 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -80,7 +80,6 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri continue } - if protocol.CanSetIdempotencyToken(value.Field(i), field) { token := protocol.GetIdempotencyToken() elemValue = reflect.ValueOf(token) @@ -122,9 +121,17 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string return nil } + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + // check for unflattened list member if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".member" + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } } for i := 0; i < value.Len(); i++ { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 20a41d46..c405288d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -17,6 +17,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" ) // RFC822 returns an RFC822 formatted timestamp for AWS protocols @@ -82,8 +83,12 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo if name == "" { name = field.Name } - if m.Kind() == reflect.Ptr { + if kind := m.Kind(); kind == reflect.Ptr { m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } } if !m.IsValid() { continue @@ -95,16 +100,16 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo var err error switch field.Tag.Get("location") { case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name) + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) case "uri": - err = buildURI(r.HTTPRequest.URL, m, name) + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) case "querystring": - err = buildQueryString(query, m, name) + err = buildQueryString(query, m, name, field.Tag) default: if buildGETQuery { - err = buildQueryString(query, m, name) + err = buildQueryString(query, m, name, field.Tag) } } r.Error = err @@ -145,8 +150,8 @@ func buildBody(r *request.Request, v reflect.Value) { } } -func buildHeader(header *http.Header, v reflect.Value, name string) error { - str, err := convertType(v) +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) if err == errValueNotSet { return nil } else if err != nil { @@ -158,9 +163,10 @@ func buildHeader(header *http.Header, v reflect.Value, name string) error { return nil } -func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key)) + str, err := convertType(v.MapIndex(key), tag) if err == errValueNotSet { continue } else if err != nil { @@ -173,8 +179,8 @@ func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { return nil } -func buildURI(u *url.URL, v reflect.Value, name string) error { - value, err := convertType(v) +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) if err == errValueNotSet { return nil } else if err != nil { @@ -190,7 +196,7 @@ func buildURI(u *url.URL, v reflect.Value, name string) error { return nil } -func buildQueryString(query url.Values, v reflect.Value, name string) error { +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { switch value := v.Interface().(type) { case []*string: for _, item := range value { @@ -207,7 +213,7 @@ func buildQueryString(query url.Values, v reflect.Value, name string) error { } } default: - str, err := convertType(v) + str, err := convertType(v, tag) if err == errValueNotSet { return nil } else if err != nil { @@ -246,13 +252,12 @@ func EscapePath(path string, encodeSep bool) string { return buf.String() } -func convertType(v reflect.Value) (string, error) { +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { v = reflect.Indirect(v) if !v.IsValid() { return "", errValueNotSet } - var str string switch value := v.Interface().(type) { case string: str = value @@ -266,8 +271,20 @@ func convertType(v reflect.Value) (string, error) { str = strconv.FormatFloat(value, 'f', -1, 64) case time.Time: str = value.UTC().Format(RFC822) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) return "", err } return str, nil diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 9c00921c..823f045e 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -12,8 +12,10 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" ) // UnmarshalHandler is a named request handler for unmarshaling rest protocol requests @@ -111,7 +113,7 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) { case "statusCode": unmarshalStatusCode(m, r.HTTPResponse.StatusCode) case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) if err != nil { r.Error = awserr.New("SerializationError", "failed to decode REST response", err) break @@ -158,8 +160,13 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err return nil } -func unmarshalHeader(v reflect.Value, header string) error { - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + isJSONValue := tag.Get("type") == "jsonvalue" + if isJSONValue { + if len(header) == 0 { + return nil + } + } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { return nil } @@ -196,6 +203,16 @@ func unmarshalHeader(v reflect.Value, header string) error { return err } v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) default: err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) return err diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index c74c1919..7091b456 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -131,7 +131,6 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl continue } - mTag := field.Tag if mTag.Get("location") != "" { // skip non-body members continue diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 64b6ddd3..87584628 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -15,7 +15,10 @@ import ( // needs to match the shape of the XML expected to be decoded. // If the shape doesn't match unmarshaling will fail. func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, _ := XMLToStruct(d, nil) + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } if n.Children != nil { for _, root := range n.Children { for _, c := range root { @@ -23,7 +26,7 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { c = wrappedChild[0] // pull out wrapped element } - err := parse(reflect.ValueOf(v), c, "") + err = parse(reflect.ValueOf(v), c, "") if err != nil { if err == io.EOF { return nil diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 3112512a..3e970b62 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -40,11 +40,16 @@ func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { out := &XMLNode{} for { tok, err := d.Token() - if tok == nil || err == io.EOF { - break - } if err != nil { - return out, err + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break } switch typed := tok.(type) { diff --git a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go deleted file mode 100644 index b51e9449..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go +++ /dev/null @@ -1,134 +0,0 @@ -package waiter - -import ( - "fmt" - "reflect" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides a collection of configuration values to setup a generated -// waiter code with. -type Config struct { - Name string - Delay int - MaxAttempts int - Operation string - Acceptors []WaitAcceptor -} - -// A WaitAcceptor provides the information needed to wait for an API operation -// to complete. -type WaitAcceptor struct { - Expected interface{} - Matcher string - State string - Argument string -} - -// A Waiter provides waiting for an operation to complete. -type Waiter struct { - Config - Client interface{} - Input interface{} -} - -// Wait waits for an operation to complete, expire max attempts, or fail. Error -// is returned if the operation fails. -func (w *Waiter) Wait() error { - client := reflect.ValueOf(w.Client) - in := reflect.ValueOf(w.Input) - method := client.MethodByName(w.Config.Operation + "Request") - - for i := 0; i < w.MaxAttempts; i++ { - res := method.Call([]reflect.Value{in}) - req := res[0].Interface().(*request.Request) - req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) - - err := req.Send() - for _, a := range w.Acceptors { - result := false - var vals []interface{} - switch a.Matcher { - case "pathAll", "path": - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case "pathAny": - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case "status": - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case "error": - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - case "pathList": - // ignored matcher - default: - logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", - w.Config.Operation, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - continue - } - - switch a.State { - case "success": - // waiter completed - return nil - case "failure": - // Waiter failure state triggered - return awserr.New("ResourceNotReady", - fmt.Sprintf("failed waiting for successful resource state"), err) - case "retry": - // clear the error and retry the operation - err = nil - default: - logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", - w.Config.Operation, a.State) - } - } - if err != nil { - return err - } - - time.Sleep(time.Second * time.Duration(w.Delay)) - } - - return awserr.New("ResourceNotReady", - fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) -} - -func logf(client reflect.Value, msg string, args ...interface{}) { - cfgVal := client.FieldByName("Config") - if !cfgVal.IsValid() { - return - } - if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { - cfg.Logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go index 7852fb6f..17750719 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go @@ -81,7 +81,7 @@ func NewCookieSigner(keyID string, privKey *rsa.PrivateKey, opts ...func(*Cookie // server's response. // // Example: -// s := NewCookieSigner(keyID, privKey) +// s := sign.NewCookieSigner(keyID, privKey) // // // Get Signed cookies for a resource that will expire in 1 hour // cookies, err := s.Sign("*", time.Now().Add(1 * time.Hour)) @@ -150,14 +150,14 @@ func cookieURLScheme(u string) (string, error) { // server's response. // // Example: -// s := NewCookieSigner(keyID, privKey) +// s := sign.NewCookieSigner(keyID, privKey) // // policy := &sign.Policy{ // Statements: []sign.Statement{ // { // // Read the provided documentation on how to set this // // correctly, you'll probably want to use wildcards. -// Resource: RawCloudFrontURL, +// Resource: rawCloudFrontURL, // Condition: sign.Condition{ // // Optional IP source address range // IPAddress: &sign.IPAddress{SourceIP: "192.0.2.0/24"}, diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 5318731f..564a3965 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -1,6 +1,5 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package s3 provides a client for Amazon Simple Storage Service. package s3 import ( @@ -8,6 +7,7 @@ import ( "io" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" @@ -18,19 +18,18 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // AbortMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the AbortMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AbortMultipartUpload for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AbortMultipartUpload method directly -// instead. +// See AbortMultipartUpload for more information on using the AbortMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AbortMultipartUploadRequest method. // req, resp := client.AbortMultipartUploadRequest(params) @@ -73,33 +72,47 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // API operation AbortMultipartUpload for usage and error information. // // Returned Error Codes: -// * NoSuchUpload +// * ErrCodeNoSuchUpload "NoSuchUpload" // The specified multipart upload does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { req, out := c.AbortMultipartUploadRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCompleteMultipartUpload = "CompleteMultipartUpload" // CompleteMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CompleteMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CompleteMultipartUpload for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CompleteMultipartUpload method directly -// instead. +// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CompleteMultipartUploadRequest method. // req, resp := client.CompleteMultipartUploadRequest(params) @@ -139,27 +152,41 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { req, out := c.CompleteMultipartUploadRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCopyObject = "CopyObject" // CopyObjectRequest generates a "aws/request.Request" representing the // client's request for the CopyObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CopyObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CopyObject method directly -// instead. +// See CopyObject for more information on using the CopyObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CopyObjectRequest method. // req, resp := client.CopyObjectRequest(params) @@ -198,34 +225,48 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // API operation CopyObject for usage and error information. // // Returned Error Codes: -// * ObjectNotInActiveTierError +// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" // The source object of the COPY operation is not in the active tier and is // only stored in Amazon Glacier. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { req, out := c.CopyObjectRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateBucket = "CreateBucket" // CreateBucketRequest generates a "aws/request.Request" representing the // client's request for the CreateBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateBucket for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateBucket method directly -// instead. +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateBucketRequest method. // req, resp := client.CreateBucketRequest(params) @@ -264,37 +305,50 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // API operation CreateBucket for usage and error information. // // Returned Error Codes: -// * BucketAlreadyExists +// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" // The requested bucket name is not available. The bucket namespace is shared // by all users of the system. Please select a different name and try again. // -// * BucketAlreadyOwnedByYou - +// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" // // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { req, out := c.CreateBucketRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateMultipartUpload = "CreateMultipartUpload" // CreateMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CreateMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateMultipartUpload for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateMultipartUpload method directly -// instead. +// See CreateMultipartUpload for more information on using the CreateMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateMultipartUploadRequest method. // req, resp := client.CreateMultipartUploadRequest(params) @@ -340,27 +394,41 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { req, out := c.CreateMultipartUploadRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucket = "DeleteBucket" // DeleteBucketRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteBucket for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucket method directly -// instead. +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketRequest method. // req, resp := client.DeleteBucketRequest(params) @@ -403,27 +471,41 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { req, out := c.DeleteBucketRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" // DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteBucketAnalyticsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly -// instead. +// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. // req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) @@ -466,27 +548,41 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketCors = "DeleteBucketCors" // DeleteBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteBucketCors for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketCors method directly -// instead. +// See DeleteBucketCors for more information on using the DeleteBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketCorsRequest method. // req, resp := client.DeleteBucketCorsRequest(params) @@ -528,27 +624,117 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { req, out := c.DeleteBucketCorsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketEncryption = "DeleteBucketEncryption" + +// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketEncryptionRequest method. +// req, resp := client.DeleteBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { + op := &request.Operation{ + Name: opDeleteBucketEncryption, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &DeleteBucketEncryptionInput{} + } + + output = &DeleteBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketEncryption API operation for Amazon Simple Storage Service. +// +// Deletes the server-side encryption configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketEncryption for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + return out, req.Send() +} + +// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" // DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteBucketInventoryConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketInventoryConfiguration method directly -// instead. +// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. // req, resp := client.DeleteBucketInventoryConfigurationRequest(params) @@ -591,27 +777,41 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { req, out := c.DeleteBucketInventoryConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteBucketLifecycle for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketLifecycle method directly -// instead. +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketLifecycleRequest method. // req, resp := client.DeleteBucketLifecycleRequest(params) @@ -653,27 +853,41 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { req, out := c.DeleteBucketLifecycleRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteBucketMetricsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketMetricsConfiguration method directly -// instead. +// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. // req, resp := client.DeleteBucketMetricsConfigurationRequest(params) @@ -716,27 +930,41 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { req, out := c.DeleteBucketMetricsConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketPolicy = "DeleteBucketPolicy" // DeleteBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteBucketPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketPolicy method directly -// instead. +// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketPolicyRequest method. // req, resp := client.DeleteBucketPolicyRequest(params) @@ -778,27 +1006,41 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { req, out := c.DeleteBucketPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketReplication = "DeleteBucketReplication" // DeleteBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteBucketReplication for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketReplication method directly -// instead. +// See DeleteBucketReplication for more information on using the DeleteBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketReplicationRequest method. // req, resp := client.DeleteBucketReplicationRequest(params) @@ -840,27 +1082,41 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { req, out := c.DeleteBucketReplicationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketTagging = "DeleteBucketTagging" // DeleteBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteBucketTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketTagging method directly -// instead. +// See DeleteBucketTagging for more information on using the DeleteBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketTaggingRequest method. // req, resp := client.DeleteBucketTaggingRequest(params) @@ -902,27 +1158,41 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { req, out := c.DeleteBucketTaggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketWebsite = "DeleteBucketWebsite" // DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteBucketWebsite for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketWebsite method directly -// instead. +// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketWebsiteRequest method. // req, resp := client.DeleteBucketWebsiteRequest(params) @@ -964,27 +1234,41 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { req, out := c.DeleteBucketWebsiteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteObject = "DeleteObject" // DeleteObjectRequest generates a "aws/request.Request" representing the // client's request for the DeleteObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObject method directly -// instead. +// See DeleteObject for more information on using the DeleteObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteObjectRequest method. // req, resp := client.DeleteObjectRequest(params) @@ -1026,27 +1310,41 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { req, out := c.DeleteObjectRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteObjectTagging = "DeleteObjectTagging" // DeleteObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteObjectTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObjectTagging method directly -// instead. +// See DeleteObjectTagging for more information on using the DeleteObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteObjectTaggingRequest method. // req, resp := client.DeleteObjectTaggingRequest(params) @@ -1086,27 +1384,41 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { req, out := c.DeleteObjectTaggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteObjects = "DeleteObjects" // DeleteObjectsRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteObjects for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObjects method directly -// instead. +// See DeleteObjects for more information on using the DeleteObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteObjectsRequest method. // req, resp := client.DeleteObjectsRequest(params) @@ -1147,27 +1459,41 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { req, out := c.DeleteObjectsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketAccelerateConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAccelerateConfiguration method directly -// instead. +// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketAccelerateConfigurationRequest method. // req, resp := client.GetBucketAccelerateConfigurationRequest(params) @@ -1207,27 +1533,41 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { req, out := c.GetBucketAccelerateConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketAcl = "GetBucketAcl" // GetBucketAclRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketAcl for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAcl method directly -// instead. +// See GetBucketAcl for more information on using the GetBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketAclRequest method. // req, resp := client.GetBucketAclRequest(params) @@ -1267,27 +1607,41 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { req, out := c.GetBucketAclRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketAnalyticsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAnalyticsConfiguration method directly -// instead. +// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. // req, resp := client.GetBucketAnalyticsConfigurationRequest(params) @@ -1328,27 +1682,41 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { req, out := c.GetBucketAnalyticsConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketCors = "GetBucketCors" // GetBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the GetBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketCors for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketCors method directly -// instead. +// See GetBucketCors for more information on using the GetBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketCorsRequest method. // req, resp := client.GetBucketCorsRequest(params) @@ -1388,27 +1756,115 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { req, out := c.GetBucketCorsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketEncryption = "GetBucketEncryption" + +// GetBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketEncryption for more information on using the GetBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketEncryptionRequest method. +// req, resp := client.GetBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { + op := &request.Operation{ + Name: opGetBucketEncryption, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &GetBucketEncryptionInput{} + } + + output = &GetBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketEncryption API operation for Amazon Simple Storage Service. +// +// Returns the server-side encryption configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketEncryption for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + return out, req.Send() +} + +// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketInventoryConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketInventoryConfiguration method directly -// instead. +// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketInventoryConfigurationRequest method. // req, resp := client.GetBucketInventoryConfigurationRequest(params) @@ -1449,27 +1905,41 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { req, out := c.GetBucketInventoryConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketLifecycle = "GetBucketLifecycle" // GetBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketLifecycle for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycle method directly -// instead. +// See GetBucketLifecycle for more information on using the GetBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLifecycleRequest method. // req, resp := client.GetBucketLifecycleRequest(params) @@ -1512,27 +1982,41 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { req, out := c.GetBucketLifecycleRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketLifecycleConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycleConfiguration method directly -// instead. +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLifecycleConfigurationRequest method. // req, resp := client.GetBucketLifecycleConfigurationRequest(params) @@ -1572,27 +2056,41 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { req, out := c.GetBucketLifecycleConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketLocation = "GetBucketLocation" // GetBucketLocationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLocation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketLocation for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLocation method directly -// instead. +// See GetBucketLocation for more information on using the GetBucketLocation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLocationRequest method. // req, resp := client.GetBucketLocationRequest(params) @@ -1632,27 +2130,41 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { req, out := c.GetBucketLocationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketLogging = "GetBucketLogging" // GetBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketLogging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLogging method directly -// instead. +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLoggingRequest method. // req, resp := client.GetBucketLoggingRequest(params) @@ -1693,27 +2205,41 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { req, out := c.GetBucketLoggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketMetricsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketMetricsConfiguration method directly -// instead. +// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketMetricsConfigurationRequest method. // req, resp := client.GetBucketMetricsConfigurationRequest(params) @@ -1754,27 +2280,41 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { req, out := c.GetBucketMetricsConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketNotification = "GetBucketNotification" // GetBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketNotification for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotification method directly -// instead. +// See GetBucketNotification for more information on using the GetBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketNotificationRequest method. // req, resp := client.GetBucketNotificationRequest(params) @@ -1817,27 +2357,41 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { req, out := c.GetBucketNotificationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" // GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketNotificationConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotificationConfiguration method directly -// instead. +// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketNotificationConfigurationRequest method. // req, resp := client.GetBucketNotificationConfigurationRequest(params) @@ -1877,27 +2431,41 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { req, out := c.GetBucketNotificationConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketPolicy = "GetBucketPolicy" // GetBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketPolicy method directly -// instead. +// See GetBucketPolicy for more information on using the GetBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketPolicyRequest method. // req, resp := client.GetBucketPolicyRequest(params) @@ -1937,27 +2505,41 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { req, out := c.GetBucketPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketReplication = "GetBucketReplication" // GetBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketReplication for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketReplication method directly -// instead. +// See GetBucketReplication for more information on using the GetBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketReplicationRequest method. // req, resp := client.GetBucketReplicationRequest(params) @@ -1997,27 +2579,41 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { req, out := c.GetBucketReplicationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketRequestPayment = "GetBucketRequestPayment" // GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the GetBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketRequestPayment for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketRequestPayment method directly -// instead. +// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketRequestPaymentRequest method. // req, resp := client.GetBucketRequestPaymentRequest(params) @@ -2057,27 +2653,41 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { req, out := c.GetBucketRequestPaymentRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketTagging = "GetBucketTagging" // GetBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketTagging method directly -// instead. +// See GetBucketTagging for more information on using the GetBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketTaggingRequest method. // req, resp := client.GetBucketTaggingRequest(params) @@ -2117,27 +2727,41 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { req, out := c.GetBucketTaggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketVersioning = "GetBucketVersioning" // GetBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the GetBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketVersioning for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketVersioning method directly -// instead. +// See GetBucketVersioning for more information on using the GetBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketVersioningRequest method. // req, resp := client.GetBucketVersioningRequest(params) @@ -2177,27 +2801,41 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { req, out := c.GetBucketVersioningRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketWebsite = "GetBucketWebsite" // GetBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the GetBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetBucketWebsite for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketWebsite method directly -// instead. +// See GetBucketWebsite for more information on using the GetBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketWebsiteRequest method. // req, resp := client.GetBucketWebsiteRequest(params) @@ -2237,27 +2875,41 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { req, out := c.GetBucketWebsiteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetObject = "GetObject" // GetObjectRequest generates a "aws/request.Request" representing the // client's request for the GetObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObject method directly -// instead. +// See GetObject for more information on using the GetObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetObjectRequest method. // req, resp := client.GetObjectRequest(params) @@ -2296,33 +2948,47 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // API operation GetObject for usage and error information. // // Returned Error Codes: -// * NoSuchKey +// * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { req, out := c.GetObjectRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetObjectAcl = "GetObjectAcl" // GetObjectAclRequest generates a "aws/request.Request" representing the // client's request for the GetObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetObjectAcl for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectAcl method directly -// instead. +// See GetObjectAcl for more information on using the GetObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetObjectAclRequest method. // req, resp := client.GetObjectAclRequest(params) @@ -2361,33 +3027,47 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // API operation GetObjectAcl for usage and error information. // // Returned Error Codes: -// * NoSuchKey +// * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { req, out := c.GetObjectAclRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetObjectTagging = "GetObjectTagging" // GetObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the GetObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetObjectTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectTagging method directly -// instead. +// See GetObjectTagging for more information on using the GetObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetObjectTaggingRequest method. // req, resp := client.GetObjectTaggingRequest(params) @@ -2427,27 +3107,41 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { req, out := c.GetObjectTaggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetObjectTorrent = "GetObjectTorrent" // GetObjectTorrentRequest generates a "aws/request.Request" representing the // client's request for the GetObjectTorrent operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetObjectTorrent for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectTorrent method directly -// instead. +// See GetObjectTorrent for more information on using the GetObjectTorrent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetObjectTorrentRequest method. // req, resp := client.GetObjectTorrentRequest(params) @@ -2487,27 +3181,41 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { req, out := c.GetObjectTorrentRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTorrent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opHeadBucket = "HeadBucket" // HeadBucketRequest generates a "aws/request.Request" representing the // client's request for the HeadBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See HeadBucket for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadBucket method directly -// instead. +// See HeadBucket for more information on using the HeadBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the HeadBucketRequest method. // req, resp := client.HeadBucketRequest(params) @@ -2549,33 +3257,47 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // API operation HeadBucket for usage and error information. // // Returned Error Codes: -// * NoSuchBucket +// * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { req, out := c.HeadBucketRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opHeadObject = "HeadObject" // HeadObjectRequest generates a "aws/request.Request" representing the // client's request for the HeadObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See HeadObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadObject method directly -// instead. +// See HeadObject for more information on using the HeadObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the HeadObjectRequest method. // req, resp := client.HeadObjectRequest(params) @@ -2608,41 +3330,53 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // object itself. This operation is useful if you're only interested in an object's // metadata. To use HEAD, you must have READ access to the object. // +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation HeadObject for usage and error information. -// -// Returned Error Codes: -// * NoSuchKey -// The specified key does not exist. -// // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { req, out := c.HeadObjectRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" // ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketAnalyticsConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ListBucketAnalyticsConfigurations for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketAnalyticsConfigurations method directly -// instead. +// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. // req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) @@ -2682,27 +3416,41 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { req, out := c.ListBucketAnalyticsConfigurationsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketInventoryConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ListBucketInventoryConfigurations for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketInventoryConfigurations method directly -// instead. +// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketInventoryConfigurationsRequest method. // req, resp := client.ListBucketInventoryConfigurationsRequest(params) @@ -2742,27 +3490,41 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { req, out := c.ListBucketInventoryConfigurationsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketInventoryConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" // ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketMetricsConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ListBucketMetricsConfigurations for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketMetricsConfigurations method directly -// instead. +// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketMetricsConfigurationsRequest method. // req, resp := client.ListBucketMetricsConfigurationsRequest(params) @@ -2802,27 +3564,41 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { req, out := c.ListBucketMetricsConfigurationsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketMetricsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opListBuckets = "ListBuckets" // ListBucketsRequest generates a "aws/request.Request" representing the // client's request for the ListBuckets operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ListBuckets for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBuckets method directly -// instead. +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketsRequest method. // req, resp := client.ListBucketsRequest(params) @@ -2862,27 +3638,41 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { req, out := c.ListBucketsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opListMultipartUploads = "ListMultipartUploads" // ListMultipartUploadsRequest generates a "aws/request.Request" representing the // client's request for the ListMultipartUploads operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ListMultipartUploads for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListMultipartUploads method directly -// instead. +// See ListMultipartUploads for more information on using the ListMultipartUploads +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListMultipartUploadsRequest method. // req, resp := client.ListMultipartUploadsRequest(params) @@ -2928,8 +3718,23 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { req, out := c.ListMultipartUploadsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, @@ -2949,31 +3754,55 @@ func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultip // return pageNum <= 3 // }) // -func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListMultipartUploadsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListMultipartUploadsOutput), lastPage) - }) +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) + } + return p.Err() } const opListObjectVersions = "ListObjectVersions" // ListObjectVersionsRequest generates a "aws/request.Request" representing the // client's request for the ListObjectVersions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ListObjectVersions for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectVersions method directly -// instead. +// See ListObjectVersions for more information on using the ListObjectVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListObjectVersionsRequest method. // req, resp := client.ListObjectVersionsRequest(params) @@ -3019,8 +3848,23 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { req, out := c.ListObjectVersionsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, @@ -3040,31 +3884,55 @@ func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVers // return pageNum <= 3 // }) // -func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectVersionsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectVersionsOutput), lastPage) - }) +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) + } + return p.Err() } const opListObjects = "ListObjects" // ListObjectsRequest generates a "aws/request.Request" representing the // client's request for the ListObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ListObjects for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjects method directly -// instead. +// See ListObjects for more information on using the ListObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListObjectsRequest method. // req, resp := client.ListObjectsRequest(params) @@ -3111,14 +3979,29 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // API operation ListObjects for usage and error information. // // Returned Error Codes: -// * NoSuchBucket +// * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { req, out := c.ListObjectsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListObjectsPages iterates over the pages of a ListObjects operation, @@ -3138,31 +4021,55 @@ func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { // return pageNum <= 3 // }) // -func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectsOutput), lastPage) - }) +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) + } + return p.Err() } const opListObjectsV2 = "ListObjectsV2" // ListObjectsV2Request generates a "aws/request.Request" representing the // client's request for the ListObjectsV2 operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ListObjectsV2 for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectsV2 method directly -// instead. +// See ListObjectsV2 for more information on using the ListObjectsV2 +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListObjectsV2Request method. // req, resp := client.ListObjectsV2Request(params) @@ -3210,14 +4117,29 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // API operation ListObjectsV2 for usage and error information. // // Returned Error Codes: -// * NoSuchBucket +// * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { req, out := c.ListObjectsV2Request(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, @@ -3237,31 +4159,55 @@ func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, err // return pageNum <= 3 // }) // -func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(p *ListObjectsV2Output, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectsV2Request(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectsV2Output), lastPage) - }) +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) + } + return p.Err() } const opListParts = "ListParts" // ListPartsRequest generates a "aws/request.Request" representing the // client's request for the ListParts operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ListParts for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListParts method directly -// instead. +// See ListParts for more information on using the ListParts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListPartsRequest method. // req, resp := client.ListPartsRequest(params) @@ -3307,8 +4253,23 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { req, out := c.ListPartsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. +// +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListPartsPages iterates over the pages of a ListParts operation, @@ -3328,31 +4289,55 @@ func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { // return pageNum <= 3 // }) // -func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListPartsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListPartsOutput), lastPage) - }) +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + } + return p.Err() } const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" // PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketAccelerateConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAccelerateConfiguration method directly -// instead. +// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketAccelerateConfigurationRequest method. // req, resp := client.PutBucketAccelerateConfigurationRequest(params) @@ -3394,27 +4379,41 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { req, out := c.PutBucketAccelerateConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketAcl = "PutBucketAcl" // PutBucketAclRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketAcl for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAcl method directly -// instead. +// See PutBucketAcl for more information on using the PutBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketAclRequest method. // req, resp := client.PutBucketAclRequest(params) @@ -3456,27 +4455,41 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { req, out := c.PutBucketAclRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" // PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketAnalyticsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAnalyticsConfiguration method directly -// instead. +// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. // req, resp := client.PutBucketAnalyticsConfigurationRequest(params) @@ -3519,27 +4532,41 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { req, out := c.PutBucketAnalyticsConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketCors = "PutBucketCors" // PutBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the PutBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketCors for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketCors method directly -// instead. +// See PutBucketCors for more information on using the PutBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketCorsRequest method. // req, resp := client.PutBucketCorsRequest(params) @@ -3581,27 +4608,118 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { req, out := c.PutBucketCorsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketEncryption = "PutBucketEncryption" + +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketEncryption for more information on using the PutBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { + op := &request.Operation{ + Name: opPutBucketEncryption, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &PutBucketEncryptionInput{} + } + + output = &PutBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketEncryption API operation for Amazon Simple Storage Service. +// +// Creates a new server-side encryption configuration (or replaces an existing +// one, if present). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketEncryption for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + return out, req.Send() +} + +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketInventoryConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketInventoryConfiguration method directly -// instead. +// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketInventoryConfigurationRequest method. // req, resp := client.PutBucketInventoryConfigurationRequest(params) @@ -3644,27 +4762,41 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { req, out := c.PutBucketInventoryConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketLifecycle = "PutBucketLifecycle" // PutBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketLifecycle for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycle method directly -// instead. +// See PutBucketLifecycle for more information on using the PutBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketLifecycleRequest method. // req, resp := client.PutBucketLifecycleRequest(params) @@ -3709,27 +4841,41 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { req, out := c.PutBucketLifecycleRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" // PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketLifecycleConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycleConfiguration method directly -// instead. +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketLifecycleConfigurationRequest method. // req, resp := client.PutBucketLifecycleConfigurationRequest(params) @@ -3772,27 +4918,41 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { req, out := c.PutBucketLifecycleConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketLogging = "PutBucketLogging" // PutBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketLogging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLogging method directly -// instead. +// See PutBucketLogging for more information on using the PutBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketLoggingRequest method. // req, resp := client.PutBucketLoggingRequest(params) @@ -3836,27 +4996,41 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { req, out := c.PutBucketLoggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" // PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketMetricsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketMetricsConfiguration method directly -// instead. +// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketMetricsConfigurationRequest method. // req, resp := client.PutBucketMetricsConfigurationRequest(params) @@ -3899,27 +5073,41 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { req, out := c.PutBucketMetricsConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketNotification = "PutBucketNotification" // PutBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketNotification for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotification method directly -// instead. +// See PutBucketNotification for more information on using the PutBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketNotificationRequest method. // req, resp := client.PutBucketNotificationRequest(params) @@ -3964,27 +5152,41 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { req, out := c.PutBucketNotificationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" // PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketNotificationConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotificationConfiguration method directly -// instead. +// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketNotificationConfigurationRequest method. // req, resp := client.PutBucketNotificationConfigurationRequest(params) @@ -4026,27 +5228,41 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { req, out := c.PutBucketNotificationConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketPolicy = "PutBucketPolicy" // PutBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the PutBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketPolicy method directly -// instead. +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketPolicyRequest method. // req, resp := client.PutBucketPolicyRequest(params) @@ -4089,27 +5305,41 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { req, out := c.PutBucketPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketReplication = "PutBucketReplication" // PutBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketReplication for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketReplication method directly -// instead. +// See PutBucketReplication for more information on using the PutBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketReplicationRequest method. // req, resp := client.PutBucketReplicationRequest(params) @@ -4152,27 +5382,41 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { req, out := c.PutBucketReplicationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketRequestPayment = "PutBucketRequestPayment" // PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the PutBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketRequestPayment for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketRequestPayment method directly -// instead. +// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketRequestPaymentRequest method. // req, resp := client.PutBucketRequestPaymentRequest(params) @@ -4218,27 +5462,41 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { req, out := c.PutBucketRequestPaymentRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketTagging = "PutBucketTagging" // PutBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the PutBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketTagging method directly -// instead. +// See PutBucketTagging for more information on using the PutBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketTaggingRequest method. // req, resp := client.PutBucketTaggingRequest(params) @@ -4280,27 +5538,41 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { req, out := c.PutBucketTaggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketVersioning = "PutBucketVersioning" // PutBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the PutBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketVersioning for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketVersioning method directly -// instead. +// See PutBucketVersioning for more information on using the PutBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketVersioningRequest method. // req, resp := client.PutBucketVersioningRequest(params) @@ -4343,27 +5615,41 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { req, out := c.PutBucketVersioningRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketWebsite = "PutBucketWebsite" // PutBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the PutBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutBucketWebsite for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketWebsite method directly -// instead. +// See PutBucketWebsite for more information on using the PutBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketWebsiteRequest method. // req, resp := client.PutBucketWebsiteRequest(params) @@ -4405,27 +5691,41 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { req, out := c.PutBucketWebsiteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutObject = "PutObject" // PutObjectRequest generates a "aws/request.Request" representing the // client's request for the PutObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObject method directly -// instead. +// See PutObject for more information on using the PutObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutObjectRequest method. // req, resp := client.PutObjectRequest(params) @@ -4465,27 +5765,41 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { req, out := c.PutObjectRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutObjectAcl = "PutObjectAcl" // PutObjectAclRequest generates a "aws/request.Request" representing the // client's request for the PutObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutObjectAcl for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObjectAcl method directly -// instead. +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutObjectAclRequest method. // req, resp := client.PutObjectAclRequest(params) @@ -4525,33 +5839,47 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // API operation PutObjectAcl for usage and error information. // // Returned Error Codes: -// * NoSuchKey +// * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { req, out := c.PutObjectAclRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutObjectTagging = "PutObjectTagging" // PutObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the PutObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutObjectTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObjectTagging method directly -// instead. +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutObjectTaggingRequest method. // req, resp := client.PutObjectTaggingRequest(params) @@ -4591,27 +5919,41 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { req, out := c.PutObjectTaggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRestoreObject = "RestoreObject" // RestoreObjectRequest generates a "aws/request.Request" representing the // client's request for the RestoreObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See RestoreObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RestoreObject method directly -// instead. +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RestoreObjectRequest method. // req, resp := client.RestoreObjectRequest(params) @@ -4650,33 +5992,47 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // API operation RestoreObject for usage and error information. // // Returned Error Codes: -// * ObjectAlreadyInActiveTierError +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" // This operation is not allowed against this storage tier // // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { req, out := c.RestoreObjectRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opUploadPart = "UploadPart" // UploadPartRequest generates a "aws/request.Request" representing the // client's request for the UploadPart operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See UploadPart for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPart method directly -// instead. +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the UploadPartRequest method. // req, resp := client.UploadPartRequest(params) @@ -4722,27 +6078,41 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { req, out := c.UploadPartRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opUploadPartCopy = "UploadPartCopy" // UploadPartCopyRequest generates a "aws/request.Request" representing the // client's request for the UploadPartCopy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See UploadPartCopy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPartCopy method directly -// instead. +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the UploadPartCopyRequest method. // req, resp := client.UploadPartCopyRequest(params) @@ -4782,8 +6152,23 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { req, out := c.UploadPartCopyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // Specifies the days since the initiation of an Incomplete Multipart Upload @@ -4871,6 +6256,13 @@ func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInp return s } +func (s *AbortMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { s.Key = &v @@ -4990,6 +6382,46 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { return s } +// Container for information regarding the access control for replicas. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlTranslation +type AccessControlTranslation struct { + _ struct{} `type:"structure"` + + // The override value for the owner of the replica object. + // + // Owner is a required field + Owner *string `type:"string" required:"true" enum:"OwnerOverride"` +} + +// String returns the string representation +func (s AccessControlTranslation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlTranslation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlTranslation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { + s.Owner = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator type AnalyticsAndOperator struct { _ struct{} `type:"structure"` @@ -5280,6 +6712,13 @@ func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDes return s } +func (s *AnalyticsS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetBucketAccountId sets the BucketAccountId field's value. func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { s.BucketAccountId = &v @@ -5547,6 +6986,140 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { return s } +// Describes how a CSV-formatted input object is formatted. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CSVInput +type CSVInput struct { + _ struct{} `type:"structure"` + + // Single character used to indicate a row should be ignored when present at + // the start of a row. + Comments *string `type:"string"` + + // Value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // Describes the first line of input. Valid values: None, Ignore, Use. + FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` + + // Value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // Single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVInput) GoString() string { + return s.String() +} + +// SetComments sets the Comments field's value. +func (s *CSVInput) SetComments(v string) *CSVInput { + s.Comments = &v + return s +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { + s.FieldDelimiter = &v + return s +} + +// SetFileHeaderInfo sets the FileHeaderInfo field's value. +func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { + s.FileHeaderInfo = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { + s.RecordDelimiter = &v + return s +} + +// Describes how CSV-formatted results are formatted. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CSVOutput +type CSVOutput struct { + _ struct{} `type:"structure"` + + // Value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // Value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // Single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Indicates whether or not all output fields should be quoted. + QuoteFields *string `type:"string" enum:"QuoteFields"` + + // Value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVOutput) GoString() string { + return s.String() +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { + s.FieldDelimiter = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetQuoteFields sets the QuoteFields field's value. +func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { + s.QuoteFields = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { + s.RecordDelimiter = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration type CloudFunctionConfiguration struct { _ struct{} `type:"structure"` @@ -5638,7 +7211,7 @@ type CompleteMultipartUploadInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. @@ -5688,6 +7261,13 @@ func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUpl return s } +func (s *CompleteMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { s.Key = &v @@ -5761,6 +7341,13 @@ func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUp return s } +func (s *CompleteMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetETag sets the ETag field's value. func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { s.ETag = &v @@ -6086,6 +7673,13 @@ func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { return s } +func (s *CopyObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCacheControl sets the CacheControl field's value. func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { s.CacheControl = &v @@ -6158,6 +7752,13 @@ func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput return s } +func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + // SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { s.CopySourceSSECustomerKeyMD5 = &v @@ -6230,6 +7831,13 @@ func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { return s } +func (s *CopyObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { s.SSECustomerKeyMD5 = &v @@ -6472,7 +8080,7 @@ type CreateBucketInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Allows grantee the read, write, read ACP, and write ACP permissions on the // bucket. @@ -6526,6 +8134,13 @@ func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { return s } +func (s *CreateBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { s.CreateBucketConfiguration = v @@ -6667,6 +8282,9 @@ type CreateMultipartUploadInput struct { // The type of storage to use for the object. Defaults to 'STANDARD'. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + // The tag-set for the object. The tag-set must be encoded as URL Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. @@ -6714,6 +8332,13 @@ func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadI return s } +func (s *CreateMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCacheControl sets the CacheControl field's value. func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { s.CacheControl = &v @@ -6804,6 +8429,13 @@ func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipar return s } +func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { s.SSECustomerKeyMD5 = &v @@ -6828,6 +8460,12 @@ func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartU return s } +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v + return s +} + // SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { s.WebsiteRedirectLocation = &v @@ -6905,6 +8543,13 @@ func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUpload return s } +func (s *CreateMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { s.Key = &v @@ -7051,6 +8696,13 @@ func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBuc return s } +func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { s.Id = &v @@ -7109,6 +8761,13 @@ func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { return s } +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` @@ -7124,6 +8783,68 @@ func (s DeleteBucketCorsOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryptionRequest +type DeleteBucketEncryptionInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the server-side encryption configuration + // to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryptionOutput +type DeleteBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionOutput) GoString() string { + return s.String() +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest type DeleteBucketInput struct { _ struct{} `type:"structure"` @@ -7161,6 +8882,13 @@ func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { return s } +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest type DeleteBucketInventoryConfigurationInput struct { _ struct{} `type:"structure"` @@ -7208,6 +8936,13 @@ func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBuc return s } +func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { s.Id = &v @@ -7266,6 +9001,13 @@ func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleI return s } +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` @@ -7328,6 +9070,13 @@ func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucke return s } +func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { s.Id = &v @@ -7401,6 +9150,13 @@ func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { return s } +func (s *DeleteBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` @@ -7453,6 +9209,13 @@ func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicat return s } +func (s *DeleteBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` @@ -7505,6 +9268,13 @@ func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput return s } +func (s *DeleteBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` @@ -7557,6 +9327,13 @@ func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput return s } +func (s *DeleteBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` @@ -7691,6 +9468,13 @@ func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { return s } +func (s *DeleteObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { s.Key = &v @@ -7809,6 +9593,13 @@ func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput return s } +func (s *DeleteObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { s.Key = &v @@ -7853,7 +9644,7 @@ type DeleteObjectsInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Delete is a required field - Delete *Delete `locationName:"Delete" type:"structure" required:"true"` + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // The concatenation of the authentication device's serial number, a space, // and the value that is displayed on your authentication device. @@ -7903,6 +9694,13 @@ func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { return s } +func (s *DeleteObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetDelete sets the Delete field's value. func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { s.Delete = v @@ -8009,16 +9807,27 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject { return s } +// Container for replication destination information. // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination type Destination struct { _ struct{} `type:"structure"` + // Container for information regarding the access control for replicas. + AccessControlTranslation *AccessControlTranslation `type:"structure"` + + // Account ID of the destination bucket. Currently this is only being verified + // if Access Control Translation is enabled + Account *string `type:"string"` + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store // replicas of the object identified by the rule. // // Bucket is a required field Bucket *string `type:"string" required:"true"` + // Container for information regarding encryption based configuration for replicas. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + // The class of storage used to store the object. StorageClass *string `type:"string" enum:"StorageClass"` } @@ -8039,6 +9848,11 @@ func (s *Destination) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.AccessControlTranslation != nil { + if err := s.AccessControlTranslation.Validate(); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -8046,18 +9860,130 @@ func (s *Destination) Validate() error { return nil } +// SetAccessControlTranslation sets the AccessControlTranslation field's value. +func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { + s.AccessControlTranslation = v + return s +} + +// SetAccount sets the Account field's value. +func (s *Destination) SetAccount(v string) *Destination { + s.Account = &v + return s +} + // SetBucket sets the Bucket field's value. func (s *Destination) SetBucket(v string) *Destination { s.Bucket = &v return s } +func (s *Destination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { + s.EncryptionConfiguration = v + return s +} + // SetStorageClass sets the StorageClass field's value. func (s *Destination) SetStorageClass(v string) *Destination { s.StorageClass = &v return s } +// Describes the server-side encryption that will be applied to the restore +// results. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Encryption +type Encryption struct { + _ struct{} `type:"structure"` + + // The server-side encryption algorithm used when storing job results in Amazon + // S3 (e.g., AES256, aws:kms). + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string `type:"string"` + + // If the encryption type is aws:kms, this optional value specifies the AWS + // KMS key ID to use for encryption of job results. + KMSKeyId *string `type:"string"` +} + +// String returns the string representation +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Encryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *Encryption) SetEncryptionType(v string) *Encryption { + s.EncryptionType = &v + return s +} + +// SetKMSContext sets the KMSContext field's value. +func (s *Encryption) SetKMSContext(v string) *Encryption { + s.KMSContext = &v + return s +} + +// SetKMSKeyId sets the KMSKeyId field's value. +func (s *Encryption) SetKMSKeyId(v string) *Encryption { + s.KMSKeyId = &v + return s +} + +// Container for information regarding encryption based configuration for replicas. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/EncryptionConfiguration +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // The id of the KMS key used to encrypt the replica object. + ReplicaKmsKeyID *string `type:"string"` +} + +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. +func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { + s.ReplicaKmsKeyID = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error type Error struct { _ struct{} `type:"structure"` @@ -8222,6 +10148,13 @@ func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAc return s } +func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput type GetBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` @@ -8283,6 +10216,13 @@ func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { return s } +func (s *GetBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput type GetBucketAclOutput struct { _ struct{} `type:"structure"` @@ -8362,6 +10302,13 @@ func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAna return s } +func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { s.Id = &v @@ -8429,6 +10376,13 @@ func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { return s } +func (s *GetBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput type GetBucketCorsOutput struct { _ struct{} `type:"structure"` @@ -8452,6 +10406,78 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryptionRequest +type GetBucketEncryptionInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which the server-side encryption configuration + // is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *GetBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryptionOutput +type GetBucketEncryptionOutput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Container for server-side encryption configuration rules. Currently S3 supports + // one rule only. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionOutput) GoString() string { + return s.String() +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { + s.ServerSideEncryptionConfiguration = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest type GetBucketInventoryConfigurationInput struct { _ struct{} `type:"structure"` @@ -8499,6 +10525,13 @@ func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInv return s } +func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { s.Id = &v @@ -8566,6 +10599,13 @@ func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLif return s } +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` @@ -8626,6 +10666,13 @@ func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { return s } +func (s *GetBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput type GetBucketLifecycleOutput struct { _ struct{} `type:"structure"` @@ -8686,6 +10733,13 @@ func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { return s } +func (s *GetBucketLocationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput type GetBucketLocationOutput struct { _ struct{} `type:"structure"` @@ -8746,6 +10800,13 @@ func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { return s } +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` @@ -8816,6 +10877,13 @@ func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetri return s } +func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { s.Id = &v @@ -8885,6 +10953,13 @@ func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBuck return s } +func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest type GetBucketPolicyInput struct { _ struct{} `type:"structure"` @@ -8922,6 +10997,13 @@ func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { return s } +func (s *GetBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput type GetBucketPolicyOutput struct { _ struct{} `type:"structure" payload:"Policy"` @@ -8983,6 +11065,13 @@ func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInp return s } +func (s *GetBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput type GetBucketReplicationOutput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` @@ -9045,6 +11134,13 @@ func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaym return s } +func (s *GetBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput type GetBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` @@ -9106,6 +11202,13 @@ func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { return s } +func (s *GetBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput type GetBucketTaggingOutput struct { _ struct{} `type:"structure"` @@ -9167,6 +11270,13 @@ func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput return s } +func (s *GetBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput type GetBucketVersioningOutput struct { _ struct{} `type:"structure"` @@ -9239,6 +11349,13 @@ func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { return s } +func (s *GetBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` @@ -9341,6 +11458,13 @@ func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { return s } +func (s *GetObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { s.Key = &v @@ -9514,6 +11638,13 @@ func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { return s } +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetIfMatch sets the IfMatch field's value. func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { s.IfMatch = &v @@ -9610,6 +11741,13 @@ func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { return s } +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { s.SSECustomerKeyMD5 = &v @@ -9954,6 +12092,13 @@ func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { return s } +func (s *GetObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { s.Key = &v @@ -10050,6 +12195,13 @@ func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { return s } +func (s *GetObjectTorrentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { s.Key = &v @@ -10138,7 +12290,7 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { type Grant struct { _ struct{} `type:"structure"` - Grantee *Grantee `type:"structure"` + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Specifies the permission given to the grantee. Permission *string `type:"string" enum:"Permission"` @@ -10293,6 +12445,13 @@ func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { return s } +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput type HeadBucketOutput struct { _ struct{} `type:"structure"` @@ -10404,6 +12563,13 @@ func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { return s } +func (s *HeadObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetIfMatch sets the IfMatch field's value. func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { s.IfMatch = &v @@ -10464,6 +12630,13 @@ func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { return s } +func (s *HeadObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { s.SSECustomerKeyMD5 = &v @@ -10809,6 +12982,31 @@ func (s *Initiator) SetID(v string) *Initiator { return s } +// Describes the serialization format of the object. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InputSerialization +type InputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput `type:"structure"` +} + +// String returns the string representation +func (s InputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { + s.CSV = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration type InventoryConfiguration struct { _ struct{} `type:"structure"` @@ -10983,6 +13181,56 @@ func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestin return s } +// Contains the type of server-side encryption used to encrypt the inventory +// results. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryEncryption +type InventoryEncryption struct { + _ struct{} `type:"structure"` + + // Specifies the use of SSE-KMS to encrypt delievered Inventory reports. + SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` + + // Specifies the use of SSE-S3 to encrypt delievered Inventory reports. + SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s InventoryEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} + if s.SSEKMS != nil { + if err := s.SSEKMS.Validate(); err != nil { + invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSEKMS sets the SSEKMS field's value. +func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { + s.SSEKMS = v + return s +} + +// SetSSES3 sets the SSES3 field's value. +func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { + s.SSES3 = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter type InventoryFilter struct { _ struct{} `type:"structure"` @@ -11035,6 +13283,10 @@ type InventoryS3BucketDestination struct { // Bucket is a required field Bucket *string `type:"string" required:"true"` + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `type:"structure"` + // Specifies the output format of the inventory results. // // Format is a required field @@ -11063,6 +13315,11 @@ func (s *InventoryS3BucketDestination) Validate() error { if s.Format == nil { invalidParams.Add(request.NewErrParamRequired("Format")) } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -11082,6 +13339,19 @@ func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDes return s } +func (s *InventoryS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryption sets the Encryption field's value. +func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { + s.Encryption = v + return s +} + // SetFormat sets the Format field's value. func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { s.Format = &v @@ -11612,6 +13882,13 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucket return s } +func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContinuationToken sets the ContinuationToken field's value. func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { s.ContinuationToken = &v @@ -11718,6 +13995,13 @@ func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucket return s } +func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContinuationToken sets the ContinuationToken field's value. func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { s.ContinuationToken = &v @@ -11824,6 +14108,13 @@ func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMe return s } +func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContinuationToken sets the ContinuationToken field's value. func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { s.ContinuationToken = &v @@ -11999,6 +14290,13 @@ func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInp return s } +func (s *ListMultipartUploadsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetDelimiter sets the Delimiter field's value. func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { s.Delimiter = &v @@ -12096,6 +14394,13 @@ func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOu return s } +func (s *ListMultipartUploadsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCommonPrefixes sets the CommonPrefixes field's value. func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { s.CommonPrefixes = v @@ -12223,6 +14528,13 @@ func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { return s } +func (s *ListObjectVersionsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetDelimiter sets the Delimiter field's value. func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { s.Delimiter = &v @@ -12450,6 +14762,13 @@ func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { return s } +func (s *ListObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetDelimiter sets the Delimiter field's value. func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { s.Delimiter = &v @@ -12662,6 +14981,13 @@ func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { return s } +func (s *ListObjectsV2Input) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContinuationToken sets the ContinuationToken field's value. func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { s.ContinuationToken = &v @@ -12911,6 +15237,13 @@ func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { return s } +func (s *ListPartsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *ListPartsInput) SetKey(v string) *ListPartsInput { s.Key = &v @@ -13018,6 +15351,13 @@ func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { return s } +func (s *ListPartsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetInitiator sets the Initiator field's value. func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { s.Initiator = v @@ -13084,6 +15424,135 @@ func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { return s } +// Describes an S3 location that will receive the results of the restore request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3Location +type Location struct { + _ struct{} `type:"structure"` + + // A list of grants that control access to the staged results. + AccessControlList []*Grant `locationNameList:"Grant" type:"list"` + + // The name of the bucket where the restore results will be placed. + // + // BucketName is a required field + BucketName *string `type:"string" required:"true"` + + // The canned ACL to apply to the restore results. + CannedACL *string `type:"string" enum:"ObjectCannedACL"` + + // Describes the server-side encryption that will be applied to the restore + // results. + Encryption *Encryption `type:"structure"` + + // The prefix that is prepended to the restore results for this request. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The class of storage used to store the restore results. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The tag-set that is applied to the restore results. + Tagging *Tagging `type:"structure"` + + // A list of metadata to store with the restore results in S3. + UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` +} + +// String returns the string representation +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Location) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.AccessControlList != nil { + for i, v := range s.AccessControlList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlList sets the AccessControlList field's value. +func (s *Location) SetAccessControlList(v []*Grant) *Location { + s.AccessControlList = v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *Location) SetBucketName(v string) *Location { + s.BucketName = &v + return s +} + +// SetCannedACL sets the CannedACL field's value. +func (s *Location) SetCannedACL(v string) *Location { + s.CannedACL = &v + return s +} + +// SetEncryption sets the Encryption field's value. +func (s *Location) SetEncryption(v *Encryption) *Location { + s.Encryption = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Location) SetPrefix(v string) *Location { + s.Prefix = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Location) SetStorageClass(v string) *Location { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *Location) SetTagging(v *Tagging) *Location { + s.Tagging = v + return s +} + +// SetUserMetadata sets the UserMetadata field's value. +func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { + s.UserMetadata = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled type LoggingEnabled struct { _ struct{} `type:"structure"` @@ -13151,6 +15620,38 @@ func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { return s } +// A metadata key-value pair to store with an object. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetadataEntry +type MetadataEntry struct { + _ struct{} `type:"structure"` + + Name *string `type:"string"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s MetadataEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetadataEntry) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *MetadataEntry) SetName(v string) *MetadataEntry { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetadataEntry) SetValue(v string) *MetadataEntry { + s.Value = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator type MetricsAndOperator struct { _ struct{} `type:"structure"` @@ -13811,6 +16312,71 @@ func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { return s } +// Describes the location where the restore job's output is stored. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OutputLocation +type OutputLocation struct { + _ struct{} `type:"structure"` + + // Describes an S3 location that will receive the results of the restore request. + S3 *Location `type:"structure"` +} + +// String returns the string representation +func (s OutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3 sets the S3 field's value. +func (s *OutputLocation) SetS3(v *Location) *OutputLocation { + s.S3 = v + return s +} + +// Describes how results of the Select job are serialized. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OutputSerialization +type OutputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput `type:"structure"` +} + +// String returns the string representation +func (s OutputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { + s.CSV = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner type Owner struct { _ struct{} `type:"structure"` @@ -13901,7 +16467,7 @@ type PutBucketAccelerateConfigurationInput struct { // Specifies the Accelerate Configuration you want to set for the bucket. // // AccelerateConfiguration is a required field - AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"` + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Name of the bucket for which the accelerate configuration is set. // @@ -13947,6 +16513,13 @@ func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAc return s } +func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput type PutBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` @@ -13969,7 +16542,7 @@ type PutBucketAclInput struct { // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14037,6 +16610,13 @@ func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { return s } +func (s *PutBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetGrantFullControl sets the GrantFullControl field's value. func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { s.GrantFullControl = &v @@ -14089,7 +16669,7 @@ type PutBucketAnalyticsConfigurationInput struct { // The configuration and any analyses for the analytics filter. // // AnalyticsConfiguration is a required field - AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"` + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // The name of the bucket to which an analytics configuration is stored. // @@ -14148,6 +16728,13 @@ func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAna return s } +func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { s.Id = &v @@ -14177,7 +16764,7 @@ type PutBucketCorsInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // CORSConfiguration is a required field - CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14217,6 +16804,13 @@ func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { return s } +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCORSConfiguration sets the CORSConfiguration field's value. func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { s.CORSConfiguration = v @@ -14238,6 +16832,88 @@ func (s PutBucketCorsOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryptionRequest +type PutBucketEncryptionInput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // The name of the bucket for which the server-side encryption configuration + // is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for server-side encryption configuration rules. Currently S3 supports + // one rule only. + // + // ServerSideEncryptionConfiguration is a required field + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } + if s.ServerSideEncryptionConfiguration != nil { + if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *PutBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { + s.ServerSideEncryptionConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryptionOutput +type PutBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketEncryptionOutput) GoString() string { + return s.String() +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest type PutBucketInventoryConfigurationInput struct { _ struct{} `type:"structure" payload:"InventoryConfiguration"` @@ -14255,7 +16931,7 @@ type PutBucketInventoryConfigurationInput struct { // Specifies the inventory configuration. // // InventoryConfiguration is a required field - InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"` + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14298,6 +16974,13 @@ func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInv return s } +func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { s.Id = &v @@ -14332,7 +17015,7 @@ type PutBucketLifecycleConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14369,6 +17052,13 @@ func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLif return s } +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetLifecycleConfiguration sets the LifecycleConfiguration field's value. func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { s.LifecycleConfiguration = v @@ -14397,7 +17087,7 @@ type PutBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14434,6 +17124,13 @@ func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { return s } +func (s *PutBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetLifecycleConfiguration sets the LifecycleConfiguration field's value. func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { s.LifecycleConfiguration = v @@ -14463,7 +17160,7 @@ type PutBucketLoggingInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // BucketLoggingStatus is a required field - BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14503,6 +17200,13 @@ func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { return s } +func (s *PutBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetBucketLoggingStatus sets the BucketLoggingStatus field's value. func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { s.BucketLoggingStatus = v @@ -14541,7 +17245,7 @@ type PutBucketMetricsConfigurationInput struct { // Specifies the metrics configuration. // // MetricsConfiguration is a required field - MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"` + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14584,6 +17288,13 @@ func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetri return s } +func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { s.Id = &v @@ -14622,7 +17333,7 @@ type PutBucketNotificationConfigurationInput struct { // this element is empty, notifications are turned off on the bucket. // // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14662,6 +17373,13 @@ func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucket return s } +func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetNotificationConfiguration sets the NotificationConfiguration field's value. func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { s.NotificationConfiguration = v @@ -14691,7 +17409,7 @@ type PutBucketNotificationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14726,6 +17444,13 @@ func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationI return s } +func (s *PutBucketNotificationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetNotificationConfiguration sets the NotificationConfiguration field's value. func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { s.NotificationConfiguration = v @@ -14754,6 +17479,10 @@ type PutBucketPolicyInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` + // The bucket policy as a JSON document. // // Policy is a required field @@ -14792,6 +17521,19 @@ func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { return s } +func (s *PutBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { s.Policy = &v @@ -14824,7 +17566,7 @@ type PutBucketReplicationInput struct { // replication configuration size can be up to 2 MB. // // ReplicationConfiguration is a required field - ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14864,6 +17606,13 @@ func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInp return s } +func (s *PutBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetReplicationConfiguration sets the ReplicationConfiguration field's value. func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { s.ReplicationConfiguration = v @@ -14893,7 +17642,7 @@ type PutBucketRequestPaymentInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // RequestPaymentConfiguration is a required field - RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14933,6 +17682,13 @@ func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaym return s } +func (s *PutBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { s.RequestPaymentConfiguration = v @@ -14962,7 +17718,7 @@ type PutBucketTaggingInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -15002,6 +17758,13 @@ func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { return s } +func (s *PutBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetTagging sets the Tagging field's value. func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { s.Tagging = v @@ -15035,7 +17798,7 @@ type PutBucketVersioningInput struct { MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` // VersioningConfiguration is a required field - VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -15070,6 +17833,13 @@ func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput return s } +func (s *PutBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetMFA sets the MFA field's value. func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { s.MFA = &v @@ -15105,7 +17875,7 @@ type PutBucketWebsiteInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // WebsiteConfiguration is a required field - WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -15145,6 +17915,13 @@ func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { return s } +func (s *PutBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetWebsiteConfiguration sets the WebsiteConfiguration field's value. func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { s.WebsiteConfiguration = v @@ -15173,7 +17950,7 @@ type PutObjectAclInput struct { // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -15259,6 +18036,13 @@ func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { return s } +func (s *PutObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetGrantFullControl sets the GrantFullControl field's value. func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { s.GrantFullControl = &v @@ -15365,6 +18149,9 @@ type PutObjectInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` @@ -15481,6 +18268,13 @@ func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { return s } +func (s *PutObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCacheControl sets the CacheControl field's value. func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { s.CacheControl = &v @@ -15511,6 +18305,12 @@ func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { return s } +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} + // SetContentType sets the ContentType field's value. func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { s.ContentType = &v @@ -15577,6 +18377,13 @@ func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { return s } +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { s.SSECustomerKeyMD5 = &v @@ -15719,7 +18526,7 @@ type PutObjectTaggingInput struct { Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -15767,6 +18574,13 @@ func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { return s } +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { s.Key = &v @@ -16117,10 +18931,13 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo return s } +// Container for information about a particular replication rule. // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule type ReplicationRule struct { _ struct{} `type:"structure"` + // Container for replication destination information. + // // Destination is a required field Destination *Destination `type:"structure" required:"true"` @@ -16134,6 +18951,9 @@ type ReplicationRule struct { // Prefix is a required field Prefix *string `type:"string" required:"true"` + // Container for filters that define which source objects should be replicated. + SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` + // The rule is ignored if status is not Enabled. // // Status is a required field @@ -16167,6 +18987,11 @@ func (s *ReplicationRule) Validate() error { invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) } } + if s.SourceSelectionCriteria != nil { + if err := s.SourceSelectionCriteria.Validate(); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -16192,6 +19017,12 @@ func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { return s } +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v + return s +} + // SetStatus sets the Status field's value. func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { s.Status = &v @@ -16253,7 +19084,8 @@ type RestoreObjectInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -16298,6 +19130,13 @@ func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { return s } +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { s.Key = &v @@ -16329,6 +19168,10 @@ type RestoreObjectOutput struct { // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Indicates the path in the provided S3 output location where Select results + // will be restored to. + RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` } // String returns the string representation @@ -16347,17 +19190,39 @@ func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { return s } +// SetRestoreOutputPath sets the RestoreOutputPath field's value. +func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { + s.RestoreOutputPath = &v + return s +} + +// Container for restore job parameters. // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest type RestoreRequest struct { _ struct{} `type:"structure"` - // Lifetime of the active copy in days - // - // Days is a required field - Days *int64 `type:"integer" required:"true"` + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + Days *int64 `type:"integer"` - // Glacier related prameters pertaining to this job. + // The optional description for the job. + Description *string `type:"string"` + + // Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. GlacierJobParameters *GlacierJobParameters `type:"structure"` + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation `type:"structure"` + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + Tier *string `type:"string" enum:"Tier"` + + // Type of restore request. + Type *string `type:"string" enum:"RestoreRequestType"` } // String returns the string representation @@ -16373,14 +19238,21 @@ func (s RestoreRequest) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *RestoreRequest) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} - if s.Days == nil { - invalidParams.Add(request.NewErrParamRequired("Days")) - } if s.GlacierJobParameters != nil { if err := s.GlacierJobParameters.Validate(); err != nil { invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) } } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.SelectParameters != nil { + if err := s.SelectParameters.Validate(); err != nil { + invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -16394,12 +19266,42 @@ func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { return s } +// SetDescription sets the Description field's value. +func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { + s.Description = &v + return s +} + // SetGlacierJobParameters sets the GlacierJobParameters field's value. func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { s.GlacierJobParameters = v return s } +// SetOutputLocation sets the OutputLocation field's value. +func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { + s.OutputLocation = v + return s +} + +// SetSelectParameters sets the SelectParameters field's value. +func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { + s.SelectParameters = v + return s +} + +// SetTier sets the Tier field's value. +func (s *RestoreRequest) SetTier(v string) *RestoreRequest { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *RestoreRequest) SetType(v string) *RestoreRequest { + s.Type = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule type RoutingRule struct { _ struct{} `type:"structure"` @@ -16568,6 +19470,373 @@ func (s *Rule) SetTransition(v *Transition) *Rule { return s } +// Specifies the use of SSE-KMS to encrypt delievered Inventory reports. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SSEKMS +type SSEKMS struct { + _ struct{} `locationName:"SSE-KMS" type:"structure"` + + // Specifies the ID of the AWS Key Management Service (KMS) master encryption + // key to use for encrypting Inventory reports. + // + // KeyId is a required field + KeyId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SSEKMS) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSEKMS) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SSEKMS) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *SSEKMS) SetKeyId(v string) *SSEKMS { + s.KeyId = &v + return s +} + +// Specifies the use of SSE-S3 to encrypt delievered Inventory reports. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SSES3 +type SSES3 struct { + _ struct{} `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s SSES3) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSES3) GoString() string { + return s.String() +} + +// Describes the parameters for Select job types. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectParameters +type SelectParameters struct { + _ struct{} `type:"structure"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (e.g., SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the serialization format of the object. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // Describes how the results of the Select job are serialized. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SelectParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpression sets the Expression field's value. +func (s *SelectParameters) SetExpression(v string) *SelectParameters { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { + s.InputSerialization = v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { + s.OutputSerialization = v + return s +} + +// Describes the default server-side encryption to apply to new objects in the +// bucket. If Put Object request does not specify any server-side encryption, +// this default encryption will be applied. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionByDefault +type ServerSideEncryptionByDefault struct { + _ struct{} `type:"structure"` + + // KMS master key ID to use for the default encryption. This parameter is allowed + // if SSEAlgorithm is aws:kms. + KMSMasterKeyID *string `type:"string"` + + // Server-side encryption algorithm to use for the default encryption. + // + // SSEAlgorithm is a required field + SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s ServerSideEncryptionByDefault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionByDefault) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionByDefault) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} + if s.SSEAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. +func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { + s.KMSMasterKeyID = &v + return s +} + +// SetSSEAlgorithm sets the SSEAlgorithm field's value. +func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { + s.SSEAlgorithm = &v + return s +} + +// Container for server-side encryption configuration rules. Currently S3 supports +// one rule only. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionConfiguration +type ServerSideEncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Container for information about a particular server-side encryption configuration + // rule. + // + // Rules is a required field + Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ServerSideEncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { + s.Rules = v + return s +} + +// Container for information about a particular server-side encryption configuration +// rule. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionRule +type ServerSideEncryptionRule struct { + _ struct{} `type:"structure"` + + // Describes the default server-side encryption to apply to new objects in the + // bucket. If Put Object request does not specify any server-side encryption, + // this default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` +} + +// String returns the string representation +func (s ServerSideEncryptionRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} + if s.ApplyServerSideEncryptionByDefault != nil { + if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. +func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { + s.ApplyServerSideEncryptionByDefault = v + return s +} + +// Container for filters that define which source objects should be replicated. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SourceSelectionCriteria +type SourceSelectionCriteria struct { + _ struct{} `type:"structure"` + + // Container for filter information of selection of KMS Encrypted S3 objects. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` +} + +// String returns the string representation +func (s SourceSelectionCriteria) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceSelectionCriteria) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SourceSelectionCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.SseKmsEncryptedObjects != nil { + if err := s.SseKmsEncryptedObjects.Validate(); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. +func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { + s.SseKmsEncryptedObjects = v + return s +} + +// Container for filter information of selection of KMS Encrypted S3 objects. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SseKmsEncryptedObjects +type SseKmsEncryptedObjects struct { + _ struct{} `type:"structure"` + + // The replication for KMS encrypted S3 objects is disabled if status is not + // Enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` +} + +// String returns the string representation +func (s SseKmsEncryptedObjects) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SseKmsEncryptedObjects) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SseKmsEncryptedObjects) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { + s.Status = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis type StorageClassAnalysis struct { _ struct{} `type:"structure"` @@ -16773,7 +20042,7 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging { type TargetGrant struct { _ struct{} `type:"structure"` - Grantee *Grantee `type:"structure"` + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Logging permissions assigned to the Grantee for the bucket. Permission *string `type:"string" enum:"BucketLogsPermission"` @@ -17113,6 +20382,13 @@ func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { return s } +func (s *UploadPartCopyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCopySource sets the CopySource field's value. func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { s.CopySource = &v @@ -17161,6 +20437,13 @@ func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartC return s } +func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + // SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { s.CopySourceSSECustomerKeyMD5 = &v @@ -17197,6 +20480,13 @@ func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { return s } +func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { s.SSECustomerKeyMD5 = &v @@ -17310,6 +20600,9 @@ type UploadPartInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // Object key for which the multipart upload was initiated. // // Key is a required field @@ -17396,12 +20689,25 @@ func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { return s } +func (s *UploadPartInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContentLength sets the ContentLength field's value. func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { s.ContentLength = &v return s } +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + // SetKey sets the Key field's value. func (s *UploadPartInput) SetKey(v string) *UploadPartInput { s.Key = &v @@ -17432,6 +20738,13 @@ func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { return s } +func (s *UploadPartInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { s.SSECustomerKeyMD5 = &v @@ -17767,6 +21080,22 @@ const ( ExpirationStatusDisabled = "Disabled" ) +const ( + // ExpressionTypeSql is a ExpressionType enum value + ExpressionTypeSql = "SQL" +) + +const ( + // FileHeaderInfoUse is a FileHeaderInfo enum value + FileHeaderInfoUse = "USE" + + // FileHeaderInfoIgnore is a FileHeaderInfo enum value + FileHeaderInfoIgnore = "IGNORE" + + // FileHeaderInfoNone is a FileHeaderInfo enum value + FileHeaderInfoNone = "NONE" +) + const ( // FilterRuleNamePrefix is a FilterRuleName enum value FilterRuleNamePrefix = "prefix" @@ -17778,6 +21107,9 @@ const ( const ( // InventoryFormatCsv is a InventoryFormat enum value InventoryFormatCsv = "CSV" + + // InventoryFormatOrc is a InventoryFormat enum value + InventoryFormatOrc = "ORC" ) const ( @@ -17814,6 +21146,9 @@ const ( // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value InventoryOptionalFieldReplicationStatus = "ReplicationStatus" + + // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value + InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" ) const ( @@ -17879,6 +21214,11 @@ const ( ObjectVersionStorageClassStandard = "STANDARD" ) +const ( + // OwnerOverrideDestination is a OwnerOverride enum value + OwnerOverrideDestination = "Destination" +) + const ( // PayerRequester is a Payer enum value PayerRequester = "Requester" @@ -17912,6 +21252,14 @@ const ( ProtocolHttps = "https" ) +const ( + // QuoteFieldsAlways is a QuoteFields enum value + QuoteFieldsAlways = "ALWAYS" + + // QuoteFieldsAsneeded is a QuoteFields enum value + QuoteFieldsAsneeded = "ASNEEDED" +) + const ( // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value ReplicationRuleStatusEnabled = "Enabled" @@ -17950,6 +21298,11 @@ const ( RequestPayerRequester = "requester" ) +const ( + // RestoreRequestTypeSelect is a RestoreRequestType enum value + RestoreRequestTypeSelect = "SELECT" +) + const ( // ServerSideEncryptionAes256 is a ServerSideEncryption enum value ServerSideEncryptionAes256 = "AES256" @@ -17958,6 +21311,14 @@ const ( ServerSideEncryptionAwsKms = "aws:kms" ) +const ( + // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusEnabled = "Enabled" + + // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusDisabled = "Disabled" +) + const ( // StorageClassStandard is a StorageClass enum value StorageClassStandard = "STANDARD" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go index c3a2702d..bc68a46a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -12,6 +12,69 @@ import ( var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + func buildGetBucketLocation(r *request.Request) { if r.DataFilled() { out := r.Data.(*GetBucketLocationOutput) @@ -24,7 +87,7 @@ func buildGetBucketLocation(r *request.Request) { match := reBucketLocation.FindSubmatch(b) if len(match) > 1 { loc := string(match[1]) - out.LocationConstraint = &loc + out.LocationConstraint = aws.String(loc) } } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 84633472..899d5e8d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -44,3 +44,21 @@ func defaultInitRequestFn(r *request.Request) { r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) } } + +// bucketGetter is an accessor interface to grab the "Bucket" field from +// an S3 type. +type bucketGetter interface { + getBucket() string +} + +// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" +// field from an S3 type. +type sseCustomerKeyGetter interface { + getSSECustomerKey() string +} + +// copySourceSSECustomerKeyGetter is an accessor interface to grab the +// "CopySourceSSECustomerKey" field from an S3 type. +type copySourceSSECustomerKeyGetter interface { + getCopySourceSSECustomerKey() string +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go new file mode 100644 index 00000000..0def0225 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -0,0 +1,26 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3 provides the client and types for making API +// requests to Amazon Simple Storage Service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. +// +// See s3 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ +// +// Using the Client +// +// To contact Amazon Simple Storage Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Storage Service client S3 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go new file mode 100644 index 00000000..39b912c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -0,0 +1,109 @@ +// Upload Managers +// +// The s3manager package's Uploader provides concurrent upload of content to S3 +// by taking advantage of S3's Multipart APIs. The Uploader also supports both +// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker +// for optimizations if the Body satisfies that type. Once the Uploader instance +// is created you can call Upload concurrently from multiple goroutines safely. +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// f, err := os.Open(filename) +// if err != nil { +// return fmt.Errorf("failed to open file %q, %v", filename, err) +// } +// +// // Upload the file to S3. +// result, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// Body: f, +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) +// +// See the s3manager package's Uploader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader +// +// Download Manager +// +// The s3manager package's Downloader provides concurrently downloading of Objects +// from S3. The Downloader will write S3 Object content with an io.WriterAt. +// Once the Downloader instance is created you can call Download concurrently from +// multiple goroutines safely. +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a file to write the S3 Object contents to. +// f, err := os.Create(filename) +// if err != nil { +// return fmt.Errorf("failed to create file %q, %v", filename, err) +// } +// +// // Write the contents of S3 Object to the file +// n, err := downloader.Download(f, &s3.GetObjectInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// }) +// if err != nil { +// return fmt.Errorf("failed to download file, %v", err) +// } +// fmt.Printf("file downloaded, %d bytes\n", n) +// +// See the s3manager package's Downloader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader +// +// Get Bucket Region +// +// GetBucketRegion will attempt to get the region for a bucket using a region +// hint to determine which AWS partition to perform the query on. Use this utility +// to determine the region a bucket is in. +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// See the s3manager package's GetBucketRegion function documentation for more information +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion +// +// S3 Crypto Client +// +// The s3crypto package provides the tools to upload and download encrypted +// content from S3. The Encryption and Decryption clients can be used concurrently +// once the client is created. +// +// sess := session.Must(session.NewSession()) +// +// // Create the decryption client. +// svc := s3crypto.NewDecryptionClient(sess) +// +// // The object will be downloaded from S3 and decrypted locally. By metadata +// // about the object's encryption will instruct the decryption client how +// // decrypt the content of the object. By default KMS is used for keys. +// result, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String(myBucket), +// Key: aws.String(myKey), +// }) +// +// See the s3crypto package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ +// +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go new file mode 100644 index 00000000..931cb17b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -0,0 +1,48 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +const ( + + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested bucket name is not available. The bucket namespace is shared + // by all users of the system. Please select a different name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + + // ErrCodeNoSuchBucket for service response error code + // "NoSuchBucket". + // + // The specified bucket does not exist. + ErrCodeNoSuchBucket = "NoSuchBucket" + + // ErrCodeNoSuchKey for service response error code + // "NoSuchKey". + // + // The specified key does not exist. + ErrCodeNoSuchKey = "NoSuchKey" + + // ErrCodeNoSuchUpload for service response error code + // "NoSuchUpload". + // + // The specified multipart upload does not exist. + ErrCodeNoSuchUpload = "NoSuchUpload" + + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This operation is not allowed against this storage tier + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + + // ErrCodeObjectNotInActiveTierError for service response error code + // "ObjectNotInActiveTierError". + // + // The source object of the COPY operation is not in the active tier and is + // only stored in Amazon Glacier. + ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go index ec3ffe44..a7fbc2de 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) @@ -113,15 +112,9 @@ func updateEndpointForAccelerate(r *request.Request) { // Attempts to retrieve the bucket name from the request input parameters. // If no bucket is found, or the field is empty "", false will be returned. func bucketNameFromReqParams(params interface{}) (string, bool) { - b, _ := awsutil.ValuesAtPath(params, "Bucket") - if len(b) == 0 { - return "", false - } - - if bucket, ok := b[0].(*string); ok { - if bucketStr := aws.StringValue(bucket); bucketStr != "" { - return bucketStr, true - } + if iface, ok := params.(bucketGetter); ok { + b := iface.getBucket() + return b, len(b) > 0 } return "", false diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index 5e6f2299..614e477d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package s3 @@ -11,10 +11,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restxml" ) -// S3 is a client for Amazon S3. -// The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. type S3 struct { *client.Client } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go index 268ea2fb..8010c4fa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -5,17 +5,27 @@ import ( "encoding/base64" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) func validateSSERequiresSSL(r *request.Request) { - if r.HTTPRequest.URL.Scheme != "https" { - p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") - if len(p) > 0 { + if r.HTTPRequest.URL.Scheme == "https" { + return + } + + if iface, ok := r.Params.(sseCustomerKeyGetter); ok { + if len(iface.getSSECustomerKey()) > 0 { r.Error = errSSERequiresSSL + return + } + } + + if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + if len(iface.getCopySourceSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return } } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index ed91c587..bcca8627 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -23,17 +23,22 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2") + // Bucket exists in a different region, and request needs // to be made to the correct region. if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { - r.Error = awserr.NewRequestFailure( - awserr.New("BucketRegionError", - fmt.Sprintf("incorrect region, the bucket is not in '%s' region", - aws.StringValue(r.Config.Region)), - nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) + r.Error = requestFailure{ + RequestFailure: awserr.NewRequestFailure( + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ), + hostID: hostID, + } return } @@ -48,6 +53,7 @@ func unmarshalError(r *request.Request) { } else { errCode = resp.Code errMsg = resp.Message + err = nil } // Fallback to status code converted to message if still no error code @@ -57,9 +63,41 @@ func unmarshalError(r *request.Request) { errMsg = statusText } - r.Error = awserr.NewRequestFailure( - awserr.New(errCode, errMsg, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) + r.Error = requestFailure{ + RequestFailure: awserr.NewRequestFailure( + awserr.New(errCode, errMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ), + hostID: hostID, + } +} + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} + +type requestFailure struct { + awserr.RequestFailure + + hostID string +} + +func (r requestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r requestFailure) String() string { + return r.Error() +} +func (r requestFailure) HostID() string { + return r.hostID } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go index 5e16be4b..2596c694 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -1,139 +1,214 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package s3 import ( - "github.com/aws/aws-sdk-go/private/waiter" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" ) // WaitUntilBucketExists uses the Amazon S3 API operation // HeadBucket to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadBucket", - Delay: 5, + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 200, }, { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 301, }, { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 403, }, { - State: "retry", - Matcher: "status", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 404, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilBucketNotExists uses the Amazon S3 API operation // HeadBucket to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadBucket", - Delay: 5, + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 404, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilObjectExists uses the Amazon S3 API operation // HeadObject to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadObject", - Delay: 5, + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 200, }, { - State: "retry", - Matcher: "status", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 404, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilObjectNotExists uses the Amazon S3 API operation // HeadObject to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadObject", - Delay: 5, + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 404, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 19c57ed6..3b8be437 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -1,11 +1,11 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package sts provides a client for AWS Security Token Service. package sts import ( "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) @@ -14,19 +14,18 @@ const opAssumeRole = "AssumeRole" // AssumeRoleRequest generates a "aws/request.Request" representing the // client's request for the AssumeRole operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AssumeRole for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRole method directly -// instead. +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleRequest method. // req, resp := client.AssumeRoleRequest(params) @@ -153,16 +152,16 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // API operation AssumeRole for usage and error information. // // Returned Error Codes: -// * MalformedPolicyDocument +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" // The request was rejected because the policy document was malformed. The error // message describes the specific error. // -// * PackedPolicyTooLarge +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" // The request was rejected because the policy document was too large. The error // message describes how big the policy document is, in packed form, as a percentage // of what the API allows. // -// * RegionDisabledException +// * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating @@ -172,27 +171,41 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { req, out := c.AssumeRoleRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithSAML operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AssumeRoleWithSAML for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithSAML method directly -// instead. +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleWithSAMLRequest method. // req, resp := client.AssumeRoleWithSAMLRequest(params) @@ -297,31 +310,31 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // API operation AssumeRoleWithSAML for usage and error information. // // Returned Error Codes: -// * MalformedPolicyDocument +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" // The request was rejected because the policy document was malformed. The error // message describes the specific error. // -// * PackedPolicyTooLarge +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" // The request was rejected because the policy document was too large. The error // message describes how big the policy document is, in packed form, as a percentage // of what the API allows. // -// * IDPRejectedClaim +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" // The identity provider (IdP) reported that authentication failed. This might // be because the claim is invalid. // // If this error is returned for the AssumeRoleWithWebIdentity operation, it // can also mean that the claim has expired or has been explicitly revoked. // -// * InvalidIdentityToken +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" // The web identity token that was passed could not be validated by AWS. Get // a new identity token from the identity provider and then retry the request. // -// * ExpiredTokenException +// * ErrCodeExpiredTokenException "ExpiredTokenException" // The web identity token that was passed is expired or is not valid. Get a // new identity token from the identity provider and then retry the request. // -// * RegionDisabledException +// * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating @@ -331,27 +344,41 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { req, out := c.AssumeRoleWithSAMLRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AssumeRoleWithWebIdentity for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithWebIdentity method directly -// instead. +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleWithWebIdentityRequest method. // req, resp := client.AssumeRoleWithWebIdentityRequest(params) @@ -478,38 +505,38 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // API operation AssumeRoleWithWebIdentity for usage and error information. // // Returned Error Codes: -// * MalformedPolicyDocument +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" // The request was rejected because the policy document was malformed. The error // message describes the specific error. // -// * PackedPolicyTooLarge +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" // The request was rejected because the policy document was too large. The error // message describes how big the policy document is, in packed form, as a percentage // of what the API allows. // -// * IDPRejectedClaim +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" // The identity provider (IdP) reported that authentication failed. This might // be because the claim is invalid. // // If this error is returned for the AssumeRoleWithWebIdentity operation, it // can also mean that the claim has expired or has been explicitly revoked. // -// * IDPCommunicationError +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" // The request could not be fulfilled because the non-AWS identity provider // (IDP) that was asked to verify the incoming identity token could not be reached. // This is often a transient error caused by network conditions. Retry the request // a limited number of times so that you don't exceed the request rate. If the // error persists, the non-AWS identity provider might be down or not responding. // -// * InvalidIdentityToken +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" // The web identity token that was passed could not be validated by AWS. Get // a new identity token from the identity provider and then retry the request. // -// * ExpiredTokenException +// * ErrCodeExpiredTokenException "ExpiredTokenException" // The web identity token that was passed is expired or is not valid. Get a // new identity token from the identity provider and then retry the request. // -// * RegionDisabledException +// * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating @@ -519,27 +546,41 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { req, out := c.AssumeRoleWithWebIdentityRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the // client's request for the DecodeAuthorizationMessage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DecodeAuthorizationMessage for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DecodeAuthorizationMessage method directly -// instead. +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DecodeAuthorizationMessageRequest method. // req, resp := client.DecodeAuthorizationMessageRequest(params) @@ -609,7 +650,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // API operation DecodeAuthorizationMessage for usage and error information. // // Returned Error Codes: -// * InvalidAuthorizationMessageException +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" // The error returned if the message passed to DecodeAuthorizationMessage was // invalid. This can happen if the token contains invalid characters, such as // linebreaks. @@ -617,27 +658,41 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { req, out := c.DecodeAuthorizationMessageRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the // client's request for the GetCallerIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetCallerIdentity for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetCallerIdentity method directly -// instead. +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetCallerIdentityRequest method. // req, resp := client.GetCallerIdentityRequest(params) @@ -678,27 +733,41 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { req, out := c.GetCallerIdentityRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetFederationToken = "GetFederationToken" // GetFederationTokenRequest generates a "aws/request.Request" representing the // client's request for the GetFederationToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetFederationToken for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetFederationToken method directly -// instead. +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetFederationTokenRequest method. // req, resp := client.GetFederationTokenRequest(params) @@ -814,16 +883,16 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // API operation GetFederationToken for usage and error information. // // Returned Error Codes: -// * MalformedPolicyDocument +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" // The request was rejected because the policy document was malformed. The error // message describes the specific error. // -// * PackedPolicyTooLarge +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" // The request was rejected because the policy document was too large. The error // message describes how big the policy document is, in packed form, as a percentage // of what the API allows. // -// * RegionDisabledException +// * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating @@ -833,27 +902,41 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { req, out := c.GetFederationTokenRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetSessionToken = "GetSessionToken" // GetSessionTokenRequest generates a "aws/request.Request" representing the // client's request for the GetSessionToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetSessionToken for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetSessionToken method directly -// instead. +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetSessionTokenRequest method. // req, resp := client.GetSessionTokenRequest(params) @@ -937,7 +1020,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // API operation GetSessionToken for usage and error information. // // Returned Error Codes: -// * RegionDisabledException +// * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating @@ -947,8 +1030,23 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { req, out := c.GetSessionTokenRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest @@ -980,7 +1078,7 @@ type AssumeRoleInput struct { // // The regex used to validated this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:\/- + // also include underscores or any of the following characters: =,.@:/- ExternalId *string `min:"2" type:"string"` // An IAM policy in JSON format. @@ -2164,9 +2262,9 @@ type GetSessionTokenInput struct { // You can find the device for an IAM user by going to the AWS Management Console // and viewing the user's security credentials. // - // The regex used to validate this parameter is a string of characters consisting + // The regex used to validated this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- + // also include underscores or any of the following characters: =,.@:/- SerialNumber *string `min:"9" type:"string"` // The value provided by the MFA device, if MFA is required. If any policy requires diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 00000000..ef681ab0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,72 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available +// and are activated by default. For more information, see Activating and Deactivating +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 00000000..e24884ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,73 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the non-AWS identity provider + // (IDP) that was asked to verify the incoming identity token could not be reached. + // This is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the non-AWS identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the policy document was too large. The error + // message describes how big the policy document is, in packed form, as a percentage + // of what the API allows. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 9c4bfb83..1ee5839e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package sts @@ -11,54 +11,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/query" ) -// The AWS Security Token Service (STS) is a web service that enables you to -// request temporary, limited-privilege credentials for AWS Identity and Access -// Management (IAM) users or for users that you authenticate (federated users). -// This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. // -// As an alternative to using the API, you can use one of the AWS SDKs, which -// consist of libraries and sample code for various programming languages and -// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient -// way to create programmatic access to STS. For example, the SDKs take care -// of cryptographically signing requests, managing errors, and retrying requests -// automatically. For information about the AWS SDKs, including how to download -// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). -// -// For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) -// in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) -// in the IAM User Guide. -// -// If you're new to AWS and need additional technical information about a specific -// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ -// (http://aws.amazon.com/documentation/). -// -// Endpoints -// -// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com -// that maps to the US East (N. Virginia) region. Additional regions are available -// and are activated by default. For more information, see Activating and Deactivating -// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) -// in the AWS General Reference. -// -// Recording API requests -// -// STS supports AWS CloudTrail, which is a service that records AWS calls for -// your AWS account and delivers log files to an Amazon S3 bucket. By using -// information collected by CloudTrail, you can determine what requests were -// successfully made to STS, who made the request, when it was made, and so -// on. To learn more about CloudTrail, including how to turn it on and find -// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). -// The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. type STS struct { *client.Client } From 32ac4679925ccfe7e191e170d77dacfd88671464 Mon Sep 17 00:00:00 2001 From: Sargun Dhillon Date: Wed, 29 Nov 2017 11:17:39 -0800 Subject: [PATCH 078/276] Introduce Walk Method Per Storage Driver Move the Walk types into registry/storage/driver, and add a Walk method to each storage driver. Although this is yet another API to implement, there is a fall back implementation that relies on List and Stat. For some filesystems this is very slow. Also, this WalkDir Method conforms better do a traditional WalkDir (a la filepath). This change is in preparation for refactoring. Signed-off-by: Sargun Dhillon --- registry/storage/catalog.go | 4 +- registry/storage/driver/azure/azure.go | 6 +++ registry/storage/driver/base/base.go | 12 +++++ registry/storage/driver/filesystem/driver.go | 6 +++ registry/storage/driver/gcs/gcs.go | 6 +++ registry/storage/driver/inmemory/driver.go | 6 +++ registry/storage/driver/oss/oss.go | 6 +++ registry/storage/driver/s3-aws/s3.go | 6 +++ registry/storage/driver/s3-goamz/s3.go | 6 +++ registry/storage/driver/storagedriver.go | 7 +++ registry/storage/driver/swift/swift.go | 6 +++ registry/storage/driver/walk.go | 52 ++++++++++++++++++++ registry/storage/purgeuploads.go | 2 +- registry/storage/walk.go | 20 +++----- registry/storage/walk_test.go | 2 +- 15 files changed, 129 insertions(+), 18 deletions(-) create mode 100644 registry/storage/driver/walk.go diff --git a/registry/storage/catalog.go b/registry/storage/catalog.go index f3c6fe9e..21950980 100644 --- a/registry/storage/catalog.go +++ b/registry/storage/catalog.go @@ -144,9 +144,9 @@ func handleRepository(fileInfo driver.FileInfo, root, last string, fn func(repoP return err } } - return ErrSkipDir + return driver.ErrSkipDir } else if strings.HasPrefix(file, "_") { - return ErrSkipDir + return driver.ErrSkipDir } return nil diff --git a/registry/storage/driver/azure/azure.go b/registry/storage/driver/azure/azure.go index a37a22bd..728d8a85 100644 --- a/registry/storage/driver/azure/azure.go +++ b/registry/storage/driver/azure/azure.go @@ -336,6 +336,12 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") } +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + // directDescendants will find direct descendants (blobs or virtual containers) // of from list of blob paths and will return their full paths. Elements in blobs // list must be prefixed with a "/" and diff --git a/registry/storage/driver/base/base.go b/registry/storage/driver/base/base.go index 1778c6c5..149cfcb2 100644 --- a/registry/storage/driver/base/base.go +++ b/registry/storage/driver/base/base.go @@ -197,3 +197,15 @@ func (base *Base) URLFor(ctx context.Context, path string, options map[string]in str, e := base.StorageDriver.URLFor(ctx, path, options) return str, base.setDriverName(e) } + +// Walk wraps Walk of underlying storage driver. +func (base *Base) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + ctx, done := dcontext.WithTrace(ctx) + defer done("%s.Walk(%q)", base.Name(), path) + + if !storagedriver.PathRegexp.MatchString(path) { + return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} + } + + return base.setDriverName(base.StorageDriver.Walk(ctx, path, f)) +} diff --git a/registry/storage/driver/filesystem/driver.go b/registry/storage/driver/filesystem/driver.go index 71c75fba..9a9c062a 100644 --- a/registry/storage/driver/filesystem/driver.go +++ b/registry/storage/driver/filesystem/driver.go @@ -315,6 +315,12 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int return "", storagedriver.ErrUnsupportedMethod{} } +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + // fullPath returns the absolute path of a key within the Driver's storage. func (d *driver) fullPath(subPath string) string { return path.Join(d.rootDirectory, subPath) diff --git a/registry/storage/driver/gcs/gcs.go b/registry/storage/driver/gcs/gcs.go index dadae79c..d129bee7 100644 --- a/registry/storage/driver/gcs/gcs.go +++ b/registry/storage/driver/gcs/gcs.go @@ -779,6 +779,12 @@ func (d *driver) URLFor(context context.Context, path string, options map[string return storage.SignedURL(d.bucket, name, opts) } +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + func startSession(client *http.Client, bucket string, name string) (uri string, err error) { u := &url.URL{ Scheme: "https", diff --git a/registry/storage/driver/inmemory/driver.go b/registry/storage/driver/inmemory/driver.go index 14bc3940..e18e2933 100644 --- a/registry/storage/driver/inmemory/driver.go +++ b/registry/storage/driver/inmemory/driver.go @@ -240,6 +240,12 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int return "", storagedriver.ErrUnsupportedMethod{} } +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + type writer struct { d *driver f *file diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index f79e3537..1dcf42b8 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -479,6 +479,12 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int return signedURL, nil } +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + func (d *driver) ossPath(path string) string { return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") } diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 33312afc..f9a60653 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -874,6 +874,12 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int return req.Presign(expiresIn) } +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + func (d *driver) s3Path(path string) string { return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") } diff --git a/registry/storage/driver/s3-goamz/s3.go b/registry/storage/driver/s3-goamz/s3.go index 4850f6d7..4bee38d7 100644 --- a/registry/storage/driver/s3-goamz/s3.go +++ b/registry/storage/driver/s3-goamz/s3.go @@ -546,6 +546,12 @@ func (d *Driver) S3BucketKey(path string) string { return d.StorageDriver.(*driver).s3Path(path) } +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + func parseError(path string, err error) error { if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { return storagedriver.PathNotFoundError{Path: path} diff --git a/registry/storage/driver/storagedriver.go b/registry/storage/driver/storagedriver.go index 4b570dd6..b220713f 100644 --- a/registry/storage/driver/storagedriver.go +++ b/registry/storage/driver/storagedriver.go @@ -83,6 +83,13 @@ type StorageDriver interface { // May return an ErrUnsupportedMethod in certain StorageDriver // implementations. URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) + + // Walk traverses a filesystem defined within driver, starting + // from the given path, calling f on each file. + // If the returned error from the WalkFn is ErrSkipDir and fileInfo refers + // to a directory, the directory will not be entered and Walk + // will continue the traversal. If fileInfo refers to a normal file, processing stops + Walk(ctx context.Context, path string, f WalkFn) error } // FileWriter provides an abstraction for an opened writable file-like object in diff --git a/registry/storage/driver/swift/swift.go b/registry/storage/driver/swift/swift.go index 11a33264..5dbb5741 100644 --- a/registry/storage/driver/swift/swift.go +++ b/registry/storage/driver/swift/swift.go @@ -644,6 +644,12 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int return tempURL, nil } +// Walk traverses a filesystem defined within driver, starting +// from the given path, calling f on each file +func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { + return storagedriver.WalkFallback(ctx, d, path, f) +} + func (d *driver) swiftPath(path string) string { return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") } diff --git a/registry/storage/driver/walk.go b/registry/storage/driver/walk.go new file mode 100644 index 00000000..2a5bc51f --- /dev/null +++ b/registry/storage/driver/walk.go @@ -0,0 +1,52 @@ +package driver + +import ( + "context" + "errors" + "sort" +) + +// ErrSkipDir is used as a return value from onFileFunc to indicate that +// the directory named in the call is to be skipped. It is not returned +// as an error by any function. +var ErrSkipDir = errors.New("skip this directory") + +// WalkFn is called once per file by Walk +type WalkFn func(fileInfo FileInfo) error + +// WalkFallback traverses a filesystem defined within driver, starting +// from the given path, calling f on each file. It uses the List method and Stat to drive itself. +// If the returned error from the WalkFn is ErrSkipDir and fileInfo refers +// to a directory, the directory will not be entered and Walk +// will continue the traversal. If fileInfo refers to a normal file, processing stops +func WalkFallback(ctx context.Context, driver StorageDriver, from string, f WalkFn) error { + children, err := driver.List(ctx, from) + if err != nil { + return err + } + sort.Stable(sort.StringSlice(children)) + for _, child := range children { + // TODO(stevvooe): Calling driver.Stat for every entry is quite + // expensive when running against backends with a slow Stat + // implementation, such as s3. This is very likely a serious + // performance bottleneck. + fileInfo, err := driver.Stat(ctx, child) + if err != nil { + return err + } + err = f(fileInfo) + if err == nil && fileInfo.IsDir() { + if err := WalkFallback(ctx, driver, child, f); err != nil { + return err + } + } else if err == ErrSkipDir { + // Stop iteration if it's a file, otherwise noop if it's a directory + if !fileInfo.IsDir() { + return nil + } + } else if err != nil { + return err + } + } + return nil +} diff --git a/registry/storage/purgeuploads.go b/registry/storage/purgeuploads.go index 2396e803..4292bf7a 100644 --- a/registry/storage/purgeuploads.go +++ b/registry/storage/purgeuploads.go @@ -75,7 +75,7 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv inUploadDir = (file == "_uploads") if fileInfo.IsDir() && !inUploadDir { - return ErrSkipDir + return storageDriver.ErrSkipDir } } diff --git a/registry/storage/walk.go b/registry/storage/walk.go index 3322d16a..add3ec46 100644 --- a/registry/storage/walk.go +++ b/registry/storage/walk.go @@ -2,27 +2,19 @@ package storage import ( "context" - "errors" "fmt" "sort" storageDriver "github.com/docker/distribution/registry/storage/driver" ) -// ErrSkipDir is used as a return value from onFileFunc to indicate that -// the directory named in the call is to be skipped. It is not returned -// as an error by any function. -var ErrSkipDir = errors.New("skip this directory") - -// WalkFn is called once per file by Walk -// If the returned error is ErrSkipDir and fileInfo refers -// to a directory, the directory will not be entered and Walk -// will continue the traversal. Otherwise Walk will return -type WalkFn func(fileInfo storageDriver.FileInfo) error - // Walk traverses a filesystem defined within driver, starting // from the given path, calling f on each file -func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { +// If the returned error from the WalkFn is ErrSkipDir and fileInfo refers +// to a directory, the directory will not be entered and Walk +// will continue the traversal. Otherwise Walk will return +// the error +func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f storageDriver.WalkFn) error { children, err := driver.List(ctx, from) if err != nil { return err @@ -38,7 +30,7 @@ func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, return err } err = f(fileInfo) - skipDir := (err == ErrSkipDir) + skipDir := (err == storageDriver.ErrSkipDir) if err != nil && !skipDir { return err } diff --git a/registry/storage/walk_test.go b/registry/storage/walk_test.go index ca9caae0..b0e7f4bc 100644 --- a/registry/storage/walk_test.go +++ b/registry/storage/walk_test.go @@ -131,7 +131,7 @@ func TestWalkSkipDir(t *testing.T) { filePath := fileInfo.Path() if filePath == "/a/b" { // skip processing /a/b/c and /a/b/c/d - return ErrSkipDir + return driver.ErrSkipDir } delete(expected, filePath) return nil From 35b29a609ed0bf97f7cc5e101b27bc53fcf44f9e Mon Sep 17 00:00:00 2001 From: Sargun Dhillon Date: Wed, 29 Nov 2017 12:26:28 -0800 Subject: [PATCH 079/276] Use the New Driver Walk method for catalog enumeration This changes the Walk Method used for catalog enumeration. Just to show how much an effect this has on our s3 storage: Original: List calls: 6839 real 3m16.636s user 0m0.000s sys 0m0.016s New: ListObjectsV2 Calls: 1805 real 0m49.970s user 0m0.008s sys 0m0.000s This is because it no longer performs a list and stat per item, and instead is able to use the metadata gained from the list as a replacement to stat. Signed-off-by: Sargun Dhillon --- registry/storage/catalog.go | 19 ++-- registry/storage/driver/s3-aws/s3.go | 129 ++++++++++++++++++++++++++- 2 files changed, 136 insertions(+), 12 deletions(-) diff --git a/registry/storage/catalog.go b/registry/storage/catalog.go index 21950980..4d4149ad 100644 --- a/registry/storage/catalog.go +++ b/registry/storage/catalog.go @@ -18,6 +18,7 @@ var errFinishedWalk = errors.New("finished walk") // Because it's a quite expensive operation, it should only be used when building up // an initial set of repositories. func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { + var finishedWalk bool var foundRepos []string if len(repos) == 0 { @@ -29,7 +30,7 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri return 0, err } - err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + err = reg.blobStore.driver.Walk(ctx, root, func(fileInfo driver.FileInfo) error { err := handleRepository(fileInfo, root, last, func(repoPath string) error { foundRepos = append(foundRepos, repoPath) return nil @@ -40,7 +41,8 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri // if we've filled our array, no need to walk any further if len(foundRepos) == len(repos) { - return errFinishedWalk + finishedWalk = true + return driver.ErrSkipDir } return nil @@ -48,14 +50,11 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri n = copy(repos, foundRepos) - switch err { - case nil: - // nil means that we completed walk and didn't fill buffer. No more - // records are available. - err = io.EOF - case errFinishedWalk: - // more records are available. - err = nil + if err != nil { + return n, err + } else if !finishedWalk { + // We didn't fill buffer. No more records are available. + return n, io.EOF } return n, err diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index f9a60653..0c6f7e5e 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -34,6 +34,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/client/transport" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" @@ -876,8 +877,132 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int // Walk traverses a filesystem defined within driver, starting // from the given path, calling f on each file -func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { - return storagedriver.WalkFallback(ctx, d, path, f) +func (d *driver) Walk(ctx context.Context, from string, f storagedriver.WalkFn) error { + path := from + if !strings.HasSuffix(path, "/") { + path = path + "/" + } + + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + var objectCount int64 + if err := d.doWalk(ctx, &objectCount, d.s3Path(path), prefix, f); err != nil { + return err + } + + // S3 doesn't have the concept of empty directories, so it'll return path not found if there are no objects + if objectCount == 0 { + return storagedriver.PathNotFoundError{Path: from} + } + + return nil +} + +type walkInfoContainer struct { + storagedriver.FileInfoFields + prefix *string +} + +// Path provides the full path of the target of this file info. +func (wi walkInfoContainer) Path() string { + return wi.FileInfoFields.Path +} + +// Size returns current length in bytes of the file. The return value can +// be used to write to the end of the file at path. The value is +// meaningless if IsDir returns true. +func (wi walkInfoContainer) Size() int64 { + return wi.FileInfoFields.Size +} + +// ModTime returns the modification time for the file. For backends that +// don't have a modification time, the creation time should be returned. +func (wi walkInfoContainer) ModTime() time.Time { + return wi.FileInfoFields.ModTime +} + +// IsDir returns true if the path is a directory. +func (wi walkInfoContainer) IsDir() bool { + return wi.FileInfoFields.IsDir +} + +func (d *driver) doWalk(parentCtx context.Context, objectCount *int64, path, prefix string, f storagedriver.WalkFn) error { + var retError error + + listObjectsInput := &s3.ListObjectsV2Input{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(path), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + } + + ctx, done := dcontext.WithTrace(parentCtx) + defer done("s3aws.ListObjectsV2Pages(%s)", path) + listObjectErr := d.S3.ListObjectsV2PagesWithContext(ctx, listObjectsInput, func(objects *s3.ListObjectsV2Output, lastPage bool) bool { + + *objectCount += *objects.KeyCount + walkInfos := make([]walkInfoContainer, 0, *objects.KeyCount) + + for _, dir := range objects.CommonPrefixes { + commonPrefix := *dir.Prefix + walkInfos = append(walkInfos, walkInfoContainer{ + prefix: dir.Prefix, + FileInfoFields: storagedriver.FileInfoFields{ + IsDir: true, + Path: strings.Replace(commonPrefix[:len(commonPrefix)-1], d.s3Path(""), prefix, 1), + }, + }) + } + + for _, file := range objects.Contents { + walkInfos = append(walkInfos, walkInfoContainer{ + FileInfoFields: storagedriver.FileInfoFields{ + IsDir: false, + Size: *file.Size, + ModTime: *file.LastModified, + Path: strings.Replace(*file.Key, d.s3Path(""), prefix, 1), + }, + }) + } + + sort.SliceStable(walkInfos, func(i, j int) bool { return walkInfos[i].FileInfoFields.Path < walkInfos[j].FileInfoFields.Path }) + + for _, walkInfo := range walkInfos { + err := f(walkInfo) + + if err == storagedriver.ErrSkipDir { + if walkInfo.IsDir() { + continue + } else { + break + } + } else if err != nil { + retError = err + return false + } + + if walkInfo.IsDir() { + if err := d.doWalk(ctx, objectCount, *walkInfo.prefix, prefix, f); err != nil { + retError = err + return false + } + } + } + return true + }) + + if retError != nil { + return retError + } + + if listObjectErr != nil { + return listObjectErr + } + + return nil } func (d *driver) s3Path(path string) string { From 585cdeb57160081e54c83fbf22e1420926f5416d Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Wed, 17 Jan 2018 16:01:41 -0800 Subject: [PATCH 080/276] context, registry/handlers: remove net/context references Signed-off-by: Stephen J Day --- context/doc.go | 5 ++--- registry/handlers/context.go | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/context/doc.go b/context/doc.go index 9b623074..0c631a9c 100644 --- a/context/doc.go +++ b/context/doc.go @@ -1,7 +1,6 @@ // Package context provides several utilities for working with -// golang.org/x/net/context in http requests. Primarily, the focus is on -// logging relevant request information but this package is not limited to -// that purpose. +// Go's context in http requests. Primarily, the focus is on logging relevant +// request information but this package is not limited to that purpose. // // The easiest way to get started is to get the background context: // diff --git a/registry/handlers/context.go b/registry/handlers/context.go index a775be7f..e5d26768 100644 --- a/registry/handlers/context.go +++ b/registry/handlers/context.go @@ -1,6 +1,7 @@ package handlers import ( + "context" "fmt" "net/http" @@ -10,7 +11,6 @@ import ( "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" "github.com/opencontainers/go-digest" - "golang.org/x/net/context" ) // Context should contain the request specific context for use in across From c7b0da2622ca4eedae558e492b97223641b6fc34 Mon Sep 17 00:00:00 2001 From: Sargun Dhillon Date: Wed, 29 Nov 2017 15:49:19 -0800 Subject: [PATCH 081/276] Use the new walk method for catalog enumeration This change is primarily to make GC faster. Signed-off-by: Sargun Dhillon --- registry/storage/blobstore.go | 4 +--- registry/storage/catalog.go | 4 ---- registry/storage/linkedblobstore.go | 8 +------- 3 files changed, 2 insertions(+), 14 deletions(-) diff --git a/registry/storage/blobstore.go b/registry/storage/blobstore.go index bfa41937..9fdf5219 100644 --- a/registry/storage/blobstore.go +++ b/registry/storage/blobstore.go @@ -88,13 +88,12 @@ func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distr } func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error { - specPath, err := pathFor(blobsPathSpec{}) if err != nil { return err } - err = Walk(ctx, bs.driver, specPath, func(fileInfo driver.FileInfo) error { + return bs.driver.Walk(ctx, specPath, func(fileInfo driver.FileInfo) error { // skip directories if fileInfo.IsDir() { return nil @@ -114,7 +113,6 @@ func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Di return ingester(digest) }) - return err } // path returns the canonical path for the blob identified by digest. The blob diff --git a/registry/storage/catalog.go b/registry/storage/catalog.go index 4d4149ad..902f40c2 100644 --- a/registry/storage/catalog.go +++ b/registry/storage/catalog.go @@ -10,10 +10,6 @@ import ( "github.com/docker/distribution/registry/storage/driver" ) -// errFinishedWalk signals an early exit to the walk when the current query -// is satisfied. -var errFinishedWalk = errors.New("finished walk") - // Returns a list, or partial list, of repositories in the registry. // Because it's a quite expensive operation, it should only be used when building up // an initial set of repositories. diff --git a/registry/storage/linkedblobstore.go b/registry/storage/linkedblobstore.go index 329163ba..3fb1da26 100644 --- a/registry/storage/linkedblobstore.go +++ b/registry/storage/linkedblobstore.go @@ -237,7 +237,7 @@ func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest. if err != nil { return err } - err = Walk(ctx, lbs.blobStore.driver, rootPath, func(fileInfo driver.FileInfo) error { + return lbs.driver.Walk(ctx, rootPath, func(fileInfo driver.FileInfo) error { // exit early if directory... if fileInfo.IsDir() { return nil @@ -273,12 +273,6 @@ func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest. return nil }) - - if err != nil { - return err - } - - return nil } func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest, sourceStat *distribution.Descriptor) (distribution.Descriptor, error) { From cbcbcb02c5fef21c0915e42902287976a8452f63 Mon Sep 17 00:00:00 2001 From: Sargun Dhillon Date: Thu, 18 Jan 2018 12:10:20 -0800 Subject: [PATCH 082/276] Remove old walk function This removes the old global walk function, and changes all the code to use the per-driver walk functions. Signed-off-by: Sargun Dhillon --- registry/storage/catalog.go | 2 +- registry/storage/driver/base/base.go | 2 +- registry/storage/error.go | 9 ++ registry/storage/purgeuploads.go | 2 +- registry/storage/purgeuploads_test.go | 2 +- registry/storage/walk.go | 51 --------- registry/storage/walk_test.go | 152 -------------------------- 7 files changed, 13 insertions(+), 207 deletions(-) create mode 100644 registry/storage/error.go delete mode 100644 registry/storage/walk.go delete mode 100644 registry/storage/walk_test.go diff --git a/registry/storage/catalog.go b/registry/storage/catalog.go index 902f40c2..3c1a78de 100644 --- a/registry/storage/catalog.go +++ b/registry/storage/catalog.go @@ -63,7 +63,7 @@ func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) return err } - err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + err = reg.blobStore.driver.Walk(ctx, root, func(fileInfo driver.FileInfo) error { return handleRepository(fileInfo, root, "", ingester) }) diff --git a/registry/storage/driver/base/base.go b/registry/storage/driver/base/base.go index 149cfcb2..87063e43 100644 --- a/registry/storage/driver/base/base.go +++ b/registry/storage/driver/base/base.go @@ -203,7 +203,7 @@ func (base *Base) Walk(ctx context.Context, path string, f storagedriver.WalkFn) ctx, done := dcontext.WithTrace(ctx) defer done("%s.Walk(%q)", base.Name(), path) - if !storagedriver.PathRegexp.MatchString(path) { + if !storagedriver.PathRegexp.MatchString(path) && path != "/" { return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } diff --git a/registry/storage/error.go b/registry/storage/error.go new file mode 100644 index 00000000..10913658 --- /dev/null +++ b/registry/storage/error.go @@ -0,0 +1,9 @@ +package storage + +import "fmt" + +// pushError formats an error type given a path and an error +// and pushes it to a slice of errors +func pushError(errors []error, path string, err error) []error { + return append(errors, fmt.Errorf("%s: %s", path, err)) +} diff --git a/registry/storage/purgeuploads.go b/registry/storage/purgeuploads.go index 4292bf7a..a82107a7 100644 --- a/registry/storage/purgeuploads.go +++ b/registry/storage/purgeuploads.go @@ -67,7 +67,7 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv return uploads, append(errors, err) } - err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { + err = driver.Walk(ctx, root, func(fileInfo storageDriver.FileInfo) error { filePath := fileInfo.Path() _, file := path.Split(filePath) if file[0] == '_' { diff --git a/registry/storage/purgeuploads_test.go b/registry/storage/purgeuploads_test.go index 069f212d..23c553ba 100644 --- a/registry/storage/purgeuploads_test.go +++ b/registry/storage/purgeuploads_test.go @@ -142,7 +142,7 @@ func TestPurgeMissingStartedAt(t *testing.T) { oneHourAgo := time.Now().Add(-1 * time.Hour) fs, ctx := testUploadFS(t, 1, "test-repo", oneHourAgo) - err := Walk(ctx, fs, "/", func(fileInfo driver.FileInfo) error { + err := fs.Walk(ctx, "/", func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() _, file := path.Split(filePath) diff --git a/registry/storage/walk.go b/registry/storage/walk.go deleted file mode 100644 index add3ec46..00000000 --- a/registry/storage/walk.go +++ /dev/null @@ -1,51 +0,0 @@ -package storage - -import ( - "context" - "fmt" - "sort" - - storageDriver "github.com/docker/distribution/registry/storage/driver" -) - -// Walk traverses a filesystem defined within driver, starting -// from the given path, calling f on each file -// If the returned error from the WalkFn is ErrSkipDir and fileInfo refers -// to a directory, the directory will not be entered and Walk -// will continue the traversal. Otherwise Walk will return -// the error -func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f storageDriver.WalkFn) error { - children, err := driver.List(ctx, from) - if err != nil { - return err - } - sort.Stable(sort.StringSlice(children)) - for _, child := range children { - // TODO(stevvooe): Calling driver.Stat for every entry is quite - // expensive when running against backends with a slow Stat - // implementation, such as s3. This is very likely a serious - // performance bottleneck. - fileInfo, err := driver.Stat(ctx, child) - if err != nil { - return err - } - err = f(fileInfo) - skipDir := (err == storageDriver.ErrSkipDir) - if err != nil && !skipDir { - return err - } - - if fileInfo.IsDir() && !skipDir { - if err := Walk(ctx, driver, child, f); err != nil { - return err - } - } - } - return nil -} - -// pushError formats an error type given a path and an error -// and pushes it to a slice of errors -func pushError(errors []error, path string, err error) []error { - return append(errors, fmt.Errorf("%s: %s", path, err)) -} diff --git a/registry/storage/walk_test.go b/registry/storage/walk_test.go deleted file mode 100644 index b0e7f4bc..00000000 --- a/registry/storage/walk_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package storage - -import ( - "context" - "fmt" - "sort" - "testing" - - "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/inmemory" -) - -func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) { - d := inmemory.New() - ctx := context.Background() - - expected := map[string]string{ - "/a": "dir", - "/a/b": "dir", - "/a/b/c": "dir", - "/a/b/c/d": "file", - "/a/b/c/e": "file", - "/a/b/f": "dir", - "/a/b/f/g": "file", - "/a/b/f/h": "file", - "/a/b/f/i": "file", - "/z": "dir", - "/z/y": "file", - } - - for p, typ := range expected { - if typ != "file" { - continue - } - - if err := d.PutContent(ctx, p, []byte(p)); err != nil { - t.Fatalf("unable to put content into fixture: %v", err) - } - } - - return d, expected, ctx -} - -func TestWalkErrors(t *testing.T) { - d, expected, ctx := testFS(t) - fileCount := len(expected) - err := Walk(ctx, d, "", func(fileInfo driver.FileInfo) error { - return nil - }) - if err == nil { - t.Error("Expected invalid root err") - } - - errEarlyExpected := fmt.Errorf("Early termination") - - err = Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { - // error on the 2nd file - if fileInfo.Path() == "/a/b" { - return errEarlyExpected - } - - delete(expected, fileInfo.Path()) - return nil - }) - if len(expected) != fileCount-1 { - t.Error("Walk failed to terminate with error") - } - if err != errEarlyExpected { - if err == nil { - t.Fatalf("expected an error due to early termination") - } else { - t.Error(err.Error()) - } - } - - err = Walk(ctx, d, "/nonexistent", func(fileInfo driver.FileInfo) error { - return nil - }) - if err == nil { - t.Errorf("Expected missing file err") - } - -} - -func TestWalk(t *testing.T) { - d, expected, ctx := testFS(t) - var traversed []string - err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - filetype, ok := expected[filePath] - if !ok { - t.Fatalf("Unexpected file in walk: %q", filePath) - } - - if fileInfo.IsDir() { - if filetype != "dir" { - t.Errorf("Unexpected file type: %q", filePath) - } - } else { - if filetype != "file" { - t.Errorf("Unexpected file type: %q", filePath) - } - - // each file has its own path as the contents. If the length - // doesn't match the path length, fail. - if fileInfo.Size() != int64(len(fileInfo.Path())) { - t.Fatalf("unexpected size for %q: %v != %v", - fileInfo.Path(), fileInfo.Size(), len(fileInfo.Path())) - } - } - delete(expected, filePath) - traversed = append(traversed, filePath) - return nil - }) - if len(expected) > 0 { - t.Errorf("Missed files in walk: %q", expected) - } - - if !sort.StringsAreSorted(traversed) { - t.Errorf("result should be sorted: %v", traversed) - } - - if err != nil { - t.Fatalf(err.Error()) - } -} - -func TestWalkSkipDir(t *testing.T) { - d, expected, ctx := testFS(t) - err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { - filePath := fileInfo.Path() - if filePath == "/a/b" { - // skip processing /a/b/c and /a/b/c/d - return driver.ErrSkipDir - } - delete(expected, filePath) - return nil - }) - if err != nil { - t.Fatalf(err.Error()) - } - if _, ok := expected["/a/b/c"]; !ok { - t.Errorf("/a/b/c not skipped") - } - if _, ok := expected["/a/b/c/d"]; !ok { - t.Errorf("/a/b/c/d not skipped") - } - if _, ok := expected["/a/b/c/e"]; !ok { - t.Errorf("/a/b/c/e not skipped") - } - -} From a4c32bce50a602b99b55236ab0d7cc9ef8af92b0 Mon Sep 17 00:00:00 2001 From: Misty Stanley-Jones Date: Tue, 23 Jan 2018 09:02:07 -0800 Subject: [PATCH 083/276] Fixed broken storage driver link Signed-off-by: Misty Stanley-Jones --- docs/spec/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/spec/index.md b/docs/spec/index.md index f397628d..46fddb12 100644 --- a/docs/spec/index.md +++ b/docs/spec/index.md @@ -7,6 +7,6 @@ keywords: registry, service, images, repository, json # Docker Registry Reference * [HTTP API V2](api.md) -* [Storage Driver](../storage-drivers/index.md) +* [Storage Driver](https://docs.docker.com/registry/storage-drivers/) * [Token Authentication Specification](auth/token.md) * [Token Authentication Implementation](auth/jwt.md) From 4ecb17cc4cdce29c5aaecbb9b1aa6fade3e213ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Bu=CC=88nemann?= Date: Thu, 1 Feb 2018 21:16:58 +0100 Subject: [PATCH 084/276] registry: support whitelisting letsencrypt hosts This adds a configuration setting `HTTP.TLS.LetsEncrypt.Hosts` which can be set to a list of hosts that the registry will whitelist for retrieving certificates from Let's Encrypt. HTTPS connections with SNI hostnames that are not whitelisted will be closed with an "unknown host" error. It is required to avoid lots of unsuccessful registrations attempts that are triggered by malicious clients connecting with bogus SNI hostnames. NOTE: Due to a bug in the deprecated vendored rsc.io/letsencrypt library clearing the host list requires deleting or editing of the cachefile to reset the hosts list to null. Signed-off-by: Felix Buenemann --- configuration/configuration.go | 4 ++++ configuration/configuration_test.go | 10 ++++++---- docs/configuration.md | 7 ++++++- registry/registry.go | 3 +++ 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/configuration/configuration.go b/configuration/configuration.go index cdc996b9..4578b7bf 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -114,6 +114,10 @@ type Configuration struct { // Email is the email to use during Let's Encrypt registration Email string `yaml:"email,omitempty"` + + // Hosts specifies the hosts which are allowed to obtain Let's + // Encrypt certificates. + Hosts []string `yaml:"hosts,omitempty"` } `yaml:"letsencrypt,omitempty"` } `yaml:"tls,omitempty"` diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index 3e1583dd..010ead87 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -78,8 +78,9 @@ var configStruct = Configuration{ Key string `yaml:"key,omitempty"` ClientCAs []string `yaml:"clientcas,omitempty"` LetsEncrypt struct { - CacheFile string `yaml:"cachefile,omitempty"` - Email string `yaml:"email,omitempty"` + CacheFile string `yaml:"cachefile,omitempty"` + Email string `yaml:"email,omitempty"` + Hosts []string `yaml:"hosts,omitempty"` } `yaml:"letsencrypt,omitempty"` } `yaml:"tls,omitempty"` Headers http.Header `yaml:"headers,omitempty"` @@ -95,8 +96,9 @@ var configStruct = Configuration{ Key string `yaml:"key,omitempty"` ClientCAs []string `yaml:"clientcas,omitempty"` LetsEncrypt struct { - CacheFile string `yaml:"cachefile,omitempty"` - Email string `yaml:"email,omitempty"` + CacheFile string `yaml:"cachefile,omitempty"` + Email string `yaml:"email,omitempty"` + Hosts []string `yaml:"hosts,omitempty"` } `yaml:"letsencrypt,omitempty"` }{ ClientCAs: []string{"/path/to/ca.pem"}, diff --git a/docs/configuration.md b/docs/configuration.md index 807353cc..dd14a689 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -215,6 +215,7 @@ http: letsencrypt: cachefile: /path/to/cache-file email: emailused@letsencrypt.com + hosts: [myregistryaddress.org] debug: addr: localhost:5001 headers: @@ -738,6 +739,7 @@ http: letsencrypt: cachefile: /path/to/cache-file email: emailused@letsencrypt.com + hosts: [myregistryaddress.org] debug: addr: localhost:5001 headers: @@ -782,12 +784,15 @@ TLS certificates provided by > accessible on port `443`. The registry defaults to listening on port `5000`. > If you run the registry as a container, consider adding the flag `-p 443:5000` > to the `docker run` command or using a similar setting in a cloud -> configuration. +> configuration. You should also set the `hosts` option to the list of hostnames +> that are valid for this registry to avoid trying to get certificates for random +> hostnames due to malicious clients connecting with bogus SNI hostnames. | Parameter | Required | Description | |-----------|----------|-------------------------------------------------------| | `cachefile` | yes | Absolute path to a file where the Let's Encrypt agent can cache data. | | `email` | yes | The email address used to register with Let's Encrypt. | +| `hosts` | no | The hostnames allowed for Let's Encrypt certificates. | ### `debug` diff --git a/registry/registry.go b/registry/registry.go index 2e887784..93296255 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -147,6 +147,9 @@ func (registry *Registry) ListenAndServe() error { return err } } + if len(config.HTTP.TLS.LetsEncrypt.Hosts) > 0 { + m.SetHosts(config.HTTP.TLS.LetsEncrypt.Hosts) + } tlsConf.GetCertificate = m.GetCertificate } else { tlsConf.Certificates = make([]tls.Certificate, 1) From 1ba5b3b5538912ba239a376c0c84ea54a062103a Mon Sep 17 00:00:00 2001 From: Stephen J Day Date: Tue, 6 Feb 2018 15:25:51 -0800 Subject: [PATCH 085/276] registry/storage: ignore missing tag on delete Signed-off-by: Stephen J Day --- registry/storage/tagstore.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/registry/storage/tagstore.go b/registry/storage/tagstore.go index 3fbed6d3..6a5848c7 100644 --- a/registry/storage/tagstore.go +++ b/registry/storage/tagstore.go @@ -122,17 +122,20 @@ func (ts *tagStore) Untag(ctx context.Context, tag string) error { name: ts.repository.Named().Name(), tag: tag, }) - - switch err.(type) { - case storagedriver.PathNotFoundError: - return distribution.ErrTagUnknown{Tag: tag} - case nil: - break - default: + if err != nil { return err } - return ts.blobStore.driver.Delete(ctx, tagPath) + if err := ts.blobStore.driver.Delete(ctx, tagPath); err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return nil // Untag is idempotent, we don't care if it didn't exist + default: + return err + } + } + + return nil } // linkedBlobStore returns the linkedBlobStore for the named tag, allowing one From 005c6e0236c98544d35197a17947c02e23bc9223 Mon Sep 17 00:00:00 2001 From: Wenkai Yin Date: Thu, 8 Feb 2018 17:28:22 +0800 Subject: [PATCH 086/276] ignore path not found error when look up tags Signed-off-by: Wenkai Yin --- registry/storage/tagstore.go | 4 ++++ registry/storage/tagstore_test.go | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/registry/storage/tagstore.go b/registry/storage/tagstore.go index 6a5848c7..ab5f54a6 100644 --- a/registry/storage/tagstore.go +++ b/registry/storage/tagstore.go @@ -182,6 +182,10 @@ func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([ tagLinkPath, err := pathFor(tagLinkPathSpec) tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + continue + } return nil, err } diff --git a/registry/storage/tagstore_test.go b/registry/storage/tagstore_test.go index 5bfa1451..314fa433 100644 --- a/registry/storage/tagstore_test.go +++ b/registry/storage/tagstore_test.go @@ -84,8 +84,8 @@ func TestTagStoreUnTag(t *testing.T) { desc := distribution.Descriptor{Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"} err := tags.Untag(ctx, "latest") - if err == nil { - t.Errorf("Expected error untagging non-existant tag") + if err != nil { + t.Error(err) } err = tags.Tag(ctx, "latest", desc) From e3c37a46e2529305ad6f5648abd6ab68c777820a Mon Sep 17 00:00:00 2001 From: tifayuki Date: Thu, 16 Nov 2017 16:43:38 -0800 Subject: [PATCH 087/276] Add Prometheus Metrics at the first iteration, only the following metrics are collected: - HTTP metrics of each API endpoint - cache counter for request/hit/miss - histogram of storage actions, including: GetContent, PutContent, Stat, List, Move, and Delete Signed-off-by: tifayuki --- cmd/registry/config-dev.yml | 5 +- configuration/configuration.go | 5 + configuration/configuration_test.go | 6 +- docs/configuration.md | 16 + metrics/prometheus.go | 13 + registry/handlers/app.go | 13 +- registry/registry.go | 10 + .../cache/cachedblobdescriptorstore.go | 10 +- registry/storage/driver/base/base.go | 35 +- vendor.conf | 9 +- vendor/github.com/beorn7/perks/LICENSE | 20 + vendor/github.com/beorn7/perks/README.md | 31 + .../beorn7/perks/quantile/stream.go | 292 +++++++ .../github.com/docker/go-metrics/LICENSE.code | 191 +++++ .../github.com/docker/go-metrics/LICENSE.docs | 425 +++++++++ vendor/github.com/docker/go-metrics/NOTICE | 16 + vendor/github.com/docker/go-metrics/README.md | 91 ++ .../github.com/docker/go-metrics/counter.go | 52 ++ vendor/github.com/docker/go-metrics/docs.go | 3 + vendor/github.com/docker/go-metrics/gauge.go | 72 ++ .../github.com/docker/go-metrics/handler.go | 74 ++ .../github.com/docker/go-metrics/helpers.go | 10 + .../github.com/docker/go-metrics/namespace.go | 315 +++++++ .../github.com/docker/go-metrics/register.go | 15 + vendor/github.com/docker/go-metrics/timer.go | 85 ++ vendor/github.com/docker/go-metrics/unit.go | 12 + .../golang_protobuf_extensions/LICENSE | 201 +++++ .../golang_protobuf_extensions/NOTICE | 1 + .../golang_protobuf_extensions/README.md | 20 + .../pbutil/decode.go | 75 ++ .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + vendor/github.com/miekg/dns/msg_generate.go | 340 -------- vendor/github.com/miekg/dns/types_generate.go | 271 ------ .../prometheus/client_golang/LICENSE | 201 +++++ .../prometheus/client_golang/NOTICE | 23 + .../prometheus/client_golang/README.md | 93 ++ .../client_golang/prometheus/README.md | 1 + .../client_golang/prometheus/collector.go | 75 ++ .../client_golang/prometheus/counter.go | 277 ++++++ .../client_golang/prometheus/desc.go | 188 ++++ .../client_golang/prometheus/doc.go | 186 ++++ .../prometheus/expvar_collector.go | 119 +++ .../client_golang/prometheus/fnv.go | 29 + .../client_golang/prometheus/gauge.go | 286 +++++++ .../client_golang/prometheus/go_collector.go | 284 ++++++ .../client_golang/prometheus/histogram.go | 505 +++++++++++ .../client_golang/prometheus/http.go | 523 ++++++++++++ .../client_golang/prometheus/labels.go | 57 ++ .../client_golang/prometheus/metric.go | 158 ++++ .../client_golang/prometheus/observer.go | 52 ++ .../prometheus/process_collector.go | 140 +++ .../prometheus/promhttp/delegator.go | 199 +++++ .../prometheus/promhttp/delegator_1_8.go | 181 ++++ .../prometheus/promhttp/delegator_pre_1_8.go | 44 + .../client_golang/prometheus/promhttp/http.go | 267 ++++++ .../prometheus/promhttp/instrument_client.go | 97 +++ .../promhttp/instrument_client_1_8.go | 144 ++++ .../prometheus/promhttp/instrument_server.go | 447 ++++++++++ .../client_golang/prometheus/registry.go | 807 ++++++++++++++++++ .../client_golang/prometheus/summary.go | 609 +++++++++++++ .../client_golang/prometheus/timer.go | 51 ++ .../client_golang/prometheus/untyped.go | 42 + .../client_golang/prometheus/value.go | 160 ++++ .../client_golang/prometheus/vec.go | 469 ++++++++++ .../prometheus/client_model/LICENSE | 201 +++++ .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/README.md | 26 + .../prometheus/client_model/go/metrics.pb.go | 364 ++++++++ vendor/github.com/prometheus/common/LICENSE | 201 +++++ vendor/github.com/prometheus/common/NOTICE | 5 + vendor/github.com/prometheus/common/README.md | 12 + .../prometheus/common/expfmt/decode.go | 429 ++++++++++ .../prometheus/common/expfmt/encode.go | 88 ++ .../prometheus/common/expfmt/expfmt.go | 38 + .../prometheus/common/expfmt/fuzz.go | 36 + .../prometheus/common/expfmt/text_create.go | 303 +++++++ .../prometheus/common/expfmt/text_parse.go | 757 ++++++++++++++++ .../bitbucket.org/ww/goautoneg/README.txt | 67 ++ .../bitbucket.org/ww/goautoneg/autoneg.go | 162 ++++ .../prometheus/common/model/alert.go | 136 +++ .../prometheus/common/model/fingerprinting.go | 105 +++ .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 210 +++++ .../prometheus/common/model/labelset.go | 169 ++++ .../prometheus/common/model/metric.go | 103 +++ .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 ++++ .../prometheus/common/model/silence.go | 106 +++ .../prometheus/common/model/time.go | 264 ++++++ .../prometheus/common/model/value.go | 416 +++++++++ vendor/github.com/prometheus/procfs/LICENSE | 201 +++++ vendor/github.com/prometheus/procfs/NOTICE | 7 + vendor/github.com/prometheus/procfs/README.md | 11 + .../github.com/prometheus/procfs/buddyinfo.go | 95 +++ vendor/github.com/prometheus/procfs/doc.go | 45 + vendor/github.com/prometheus/procfs/fs.go | 69 ++ .../prometheus/procfs/internal/util/parse.go | 46 + vendor/github.com/prometheus/procfs/ipvs.go | 246 ++++++ vendor/github.com/prometheus/procfs/mdstat.go | 138 +++ .../prometheus/procfs/mountstats.go | 556 ++++++++++++ .../github.com/prometheus/procfs/net_dev.go | 203 +++++ .../github.com/prometheus/procfs/nfs/nfs.go | 263 ++++++ .../github.com/prometheus/procfs/nfs/parse.go | 308 +++++++ .../prometheus/procfs/nfs/parse_nfs.go | 67 ++ .../prometheus/procfs/nfs/parse_nfsd.go | 89 ++ vendor/github.com/prometheus/procfs/proc.go | 224 +++++ .../github.com/prometheus/procfs/proc_io.go | 52 ++ .../prometheus/procfs/proc_limits.go | 137 +++ .../github.com/prometheus/procfs/proc_ns.go | 55 ++ .../github.com/prometheus/procfs/proc_stat.go | 175 ++++ vendor/github.com/prometheus/procfs/stat.go | 219 +++++ vendor/github.com/prometheus/procfs/xfrm.go | 187 ++++ .../github.com/prometheus/procfs/xfs/parse.go | 330 +++++++ .../github.com/prometheus/procfs/xfs/xfs.go | 163 ++++ vendor/golang.org/x/net/idna/idna.go | 68 -- vendor/golang.org/x/net/idna/punycode.go | 200 ----- vendor/golang.org/x/net/publicsuffix/gen.go | 663 -------------- 118 files changed, 17182 insertions(+), 1550 deletions(-) create mode 100644 metrics/prometheus.go create mode 100644 vendor/github.com/beorn7/perks/LICENSE create mode 100644 vendor/github.com/beorn7/perks/README.md create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 vendor/github.com/docker/go-metrics/LICENSE.code create mode 100644 vendor/github.com/docker/go-metrics/LICENSE.docs create mode 100644 vendor/github.com/docker/go-metrics/NOTICE create mode 100644 vendor/github.com/docker/go-metrics/README.md create mode 100644 vendor/github.com/docker/go-metrics/counter.go create mode 100644 vendor/github.com/docker/go-metrics/docs.go create mode 100644 vendor/github.com/docker/go-metrics/gauge.go create mode 100644 vendor/github.com/docker/go-metrics/handler.go create mode 100644 vendor/github.com/docker/go-metrics/helpers.go create mode 100644 vendor/github.com/docker/go-metrics/namespace.go create mode 100644 vendor/github.com/docker/go-metrics/register.go create mode 100644 vendor/github.com/docker/go-metrics/timer.go create mode 100644 vendor/github.com/docker/go-metrics/unit.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/README.md create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go delete mode 100644 vendor/github.com/miekg/dns/msg_generate.go delete mode 100644 vendor/github.com/miekg/dns/types_generate.go create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/README.md create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/README.md create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/README.md create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/mountstats.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/nfs.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfs.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/prometheus/procfs/xfrm.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/parse.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/xfs.go delete mode 100644 vendor/golang.org/x/net/idna/idna.go delete mode 100644 vendor/golang.org/x/net/idna/punycode.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/gen.go diff --git a/cmd/registry/config-dev.yml b/cmd/registry/config-dev.yml index b6438be5..e6b09064 100644 --- a/cmd/registry/config-dev.yml +++ b/cmd/registry/config-dev.yml @@ -31,7 +31,10 @@ storage: http: addr: :5000 debug: - addr: localhost:5001 + addr: :5001 + prometheus: + enabled: true + path: /metrics headers: X-Content-Type-Options: [nosniff] redis: diff --git a/configuration/configuration.go b/configuration/configuration.go index cdc996b9..86c66b7c 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -129,6 +129,11 @@ type Configuration struct { Debug struct { // Addr specifies the bind address for the debug server. Addr string `yaml:"addr,omitempty"` + // Prometheus configures the Prometheus telemetry endpoint. + Prometheus struct { + Enabled bool `yaml:"enabled,omitempty"` + Path string `yaml:"path,omitempty"` + } `yaml:"prometheus,omitempty"` } `yaml:"debug,omitempty"` // HTTP2 configuration options diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index 3e1583dd..277748a5 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -84,7 +84,11 @@ var configStruct = Configuration{ } `yaml:"tls,omitempty"` Headers http.Header `yaml:"headers,omitempty"` Debug struct { - Addr string `yaml:"addr,omitempty"` + Addr string `yaml:"addr,omitempty"` + Prometheus struct { + Enabled bool `yaml:"enabled,omitempty"` + Path string `yaml:"path,omitempty"` + } `yaml:"prometheus,omitempty"` } `yaml:"debug,omitempty"` HTTP2 struct { Disabled bool `yaml:"disabled,omitempty"` diff --git a/docs/configuration.md b/docs/configuration.md index c7f9023f..464a3cca 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -213,6 +213,9 @@ http: email: emailused@letsencrypt.com debug: addr: localhost:5001 + prometheus: + enabled: true + path: /metrics headers: X-Content-Type-Options: [nosniff] http2: @@ -784,6 +787,19 @@ access to the debug endpoint is locked down in a production environment. The `debug` section takes a single required `addr` parameter, which specifies the `HOST:PORT` on which the debug server should accept connections. +## `prometheus` + +The `prometheus` option defines whether the prometheus metrics is enable, as well +as the path to access the metrics. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `enabled` | no | Set `true` to enable the prometheus server | +| `path` | no | The path to access the metrics, `/metrics` by default | + +The url to access the metrics is `HOST:PORT/path`, where `HOST:PORT` is defined +in `addr` under `debug`. + ### `headers` The `headers` option is **optional** . Use it to specify headers that the HTTP diff --git a/metrics/prometheus.go b/metrics/prometheus.go new file mode 100644 index 00000000..b5a53214 --- /dev/null +++ b/metrics/prometheus.go @@ -0,0 +1,13 @@ +package metrics + +import "github.com/docker/go-metrics" + +const ( + // NamespacePrefix is the namespace of prometheus metrics + NamespacePrefix = "registry" +) + +var ( + // StorageNamespace is the prometheus namespace of blob/cache related operations + StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) +) diff --git a/registry/handlers/app.go b/registry/handlers/app.go index e38050cb..67ca31fb 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -20,6 +20,7 @@ import ( dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/health" "github.com/docker/distribution/health/checks" + prometheus "github.com/docker/distribution/metrics" "github.com/docker/distribution/notifications" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" @@ -35,6 +36,7 @@ import ( "github.com/docker/distribution/registry/storage/driver/factory" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" "github.com/docker/distribution/version" + "github.com/docker/go-metrics" "github.com/docker/libtrust" "github.com/garyburd/redigo/redis" "github.com/gorilla/mux" @@ -412,6 +414,15 @@ func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { // passed through the application filters and context will be constructed at // request time. func (app *App) register(routeName string, dispatch dispatchFunc) { + handler := app.dispatcher(dispatch) + + // Chain the handler with prometheus instrumented handler + if app.Config.HTTP.Debug.Prometheus.Enabled { + namespace := metrics.NewNamespace(prometheus.NamespacePrefix, "http", nil) + httpMetrics := namespace.NewDefaultHttpMetrics(strings.Replace(routeName, "-", "_", -1)) + metrics.Register(namespace) + handler = metrics.InstrumentHandler(httpMetrics, handler) + } // TODO(stevvooe): This odd dispatcher/route registration is by-product of // some limitations in the gorilla/mux router. We are using it to keep @@ -419,7 +430,7 @@ func (app *App) register(routeName string, dispatch dispatchFunc) { // replace it with manual routing and structure-based dispatch for better // control over the request execution. - app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) + app.router.GetRoute(routeName).Handler(handler) } // configureEvents prepares the event sink for action. diff --git a/registry/registry.go b/registry/registry.go index 2e887784..d9244f2f 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -21,6 +21,7 @@ import ( "github.com/docker/distribution/registry/listener" "github.com/docker/distribution/uuid" "github.com/docker/distribution/version" + "github.com/docker/go-metrics" gorhandlers "github.com/gorilla/handlers" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -58,6 +59,15 @@ var ServeCmd = &cobra.Command{ log.Fatalln(err) } + if config.HTTP.Debug.Prometheus.Enabled { + path := config.HTTP.Debug.Prometheus.Path + if path == "" { + path = "/metrics" + } + log.Info("providing prometheus metrics on ", path) + http.Handle(path, metrics.Handler()) + } + if err = registry.ListenAndServe(); err != nil { log.Fatalln(err) } diff --git a/registry/storage/cache/cachedblobdescriptorstore.go b/registry/storage/cache/cachedblobdescriptorstore.go index cdc34f5f..ac4c4521 100644 --- a/registry/storage/cache/cachedblobdescriptorstore.go +++ b/registry/storage/cache/cachedblobdescriptorstore.go @@ -4,6 +4,7 @@ import ( "context" "github.com/docker/distribution" + prometheus "github.com/docker/distribution/metrics" "github.com/opencontainers/go-digest" ) @@ -38,6 +39,11 @@ type cachedBlobStatter struct { tracker MetricsTracker } +var ( + // cacheCount is the number of total cache request received/hits/misses + cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") +) + // NewCachedBlobStatter creates a new statter which prefers a cache and // falls back to a backend. func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { @@ -58,6 +64,7 @@ func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, b } func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + cacheCount.WithValues("Request").Inc(1) desc, err := cbds.cache.Stat(ctx, dgst) if err != nil { if err != distribution.ErrBlobUnknown { @@ -66,12 +73,13 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di goto fallback } - + cacheCount.WithValues("Hit").Inc(1) if cbds.tracker != nil { cbds.tracker.Hit() } return desc, nil fallback: + cacheCount.WithValues("Miss").Inc(1) if cbds.tracker != nil { cbds.tracker.Miss() } diff --git a/registry/storage/driver/base/base.go b/registry/storage/driver/base/base.go index 1778c6c5..b9cf6765 100644 --- a/registry/storage/driver/base/base.go +++ b/registry/storage/driver/base/base.go @@ -42,9 +42,21 @@ import ( "io" dcontext "github.com/docker/distribution/context" + prometheus "github.com/docker/distribution/metrics" storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/go-metrics" + "time" ) +var ( + // storageAction is the metrics of blob related operations + storageAction = prometheus.StorageNamespace.NewLabeledTimer("action", "The number of seconds that the storage action takes", "driver", "action") +) + +func init() { + metrics.Register(prometheus.StorageNamespace) +} + // Base provides a wrapper around a storagedriver implementation that provides // common path and bounds checking. type Base struct { @@ -87,7 +99,9 @@ func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } + start := time.Now() b, e := base.StorageDriver.GetContent(ctx, path) + storageAction.WithValues(base.Name(), "GetContent").UpdateSince(start) return b, base.setDriverName(e) } @@ -100,7 +114,10 @@ func (base *Base) PutContent(ctx context.Context, path string, content []byte) e return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - return base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) + start := time.Now() + err := base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) + storageAction.WithValues(base.Name(), "PutContent").UpdateSince(start) + return err } // Reader wraps Reader of underlying storage driver. @@ -142,7 +159,9 @@ func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } + start := time.Now() fi, e := base.StorageDriver.Stat(ctx, path) + storageAction.WithValues(base.Name(), "Stat").UpdateSince(start) return fi, base.setDriverName(e) } @@ -155,7 +174,9 @@ func (base *Base) List(ctx context.Context, path string) ([]string, error) { return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } + start := time.Now() str, e := base.StorageDriver.List(ctx, path) + storageAction.WithValues(base.Name(), "List").UpdateSince(start) return str, base.setDriverName(e) } @@ -170,7 +191,10 @@ func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) return storagedriver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()} } - return base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath)) + start := time.Now() + err := base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath)) + storageAction.WithValues(base.Name(), "Move").UpdateSince(start) + return err } // Delete wraps Delete of underlying storage driver. @@ -182,7 +206,10 @@ func (base *Base) Delete(ctx context.Context, path string) error { return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } - return base.setDriverName(base.StorageDriver.Delete(ctx, path)) + start := time.Now() + err := base.setDriverName(base.StorageDriver.Delete(ctx, path)) + storageAction.WithValues(base.Name(), "Delete").UpdateSince(start) + return err } // URLFor wraps URLFor of underlying storage driver. @@ -194,6 +221,8 @@ func (base *Base) URLFor(ctx context.Context, path string, options map[string]in return "", storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } + start := time.Now() str, e := base.StorageDriver.URLFor(ctx, path, options) + storageAction.WithValues(base.Name(), "URLFor").UpdateSince(start) return str, base.setDriverName(e) } diff --git a/vendor.conf b/vendor.conf index d67edd77..8b0d98d6 100644 --- a/vendor.conf +++ b/vendor.conf @@ -3,11 +3,13 @@ github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716 github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 +github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 @@ -18,9 +20,14 @@ github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 +github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 +github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c +github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 +github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 @@ -40,4 +47,4 @@ gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git -github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb \ No newline at end of file diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 00000000..339177be --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/README.md b/vendor/github.com/beorn7/perks/README.md new file mode 100644 index 00000000..fc057777 --- /dev/null +++ b/vendor/github.com/beorn7/perks/README.md @@ -0,0 +1,31 @@ +# Perks for Go (golang.org) + +Perks contains the Go package quantile that computes approximate quantiles over +an unbounded data stream within low memory and CPU bounds. + +For more information and examples, see: +http://godoc.org/github.com/bmizerany/perks + +A very special thank you and shout out to Graham Cormode (Rutgers University), +Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and +Divesh Srivastava (AT&T Labs–Research) for their research and publication of +[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf) + +Thank you, also: +* Armon Dadgar (@armon) +* Andrew Gerrand (@nf) +* Brad Fitzpatrick (@bradfitz) +* Keith Rarick (@kr) + +FAQ: + +Q: Why not move the quantile package into the project root? +A: I want to add more packages to perks later. + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 00000000..f4cabd66 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,292 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targets map[float64]float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for quantile, epsilon := range targets { + if quantile*s.n <= r { + f = (2 * epsilon * r) / quantile + } else { + f = (2 * epsilon * (s.n - r)) / (1 - quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/docker/go-metrics/LICENSE.code b/vendor/github.com/docker/go-metrics/LICENSE.code new file mode 100644 index 00000000..8f3fee62 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-metrics/LICENSE.docs b/vendor/github.com/docker/go-metrics/LICENSE.docs new file mode 100644 index 00000000..e26cd4fc --- /dev/null +++ b/vendor/github.com/docker/go-metrics/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/go-metrics/NOTICE b/vendor/github.com/docker/go-metrics/NOTICE new file mode 100644 index 00000000..8915f027 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/docker/go-metrics/README.md new file mode 100644 index 00000000..a9e947cb --- /dev/null +++ b/vendor/github.com/docker/go-metrics/README.md @@ -0,0 +1,91 @@ +# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics) + +This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +## Best Practices + +This packages is meant to be used for collecting metrics in Docker projects. +It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected. +If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/). + +The following are a few Docker specific rules that will help you name and work with metrics in your project. + +1. Namespace and Subsystem + +This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics. + +```go +ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, +}) +``` + +In the example above we are creating metrics for the Docker engine's daemon package. +`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics. + +A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting. + +2. Declaring your Metrics + +Try to keep all your metric declarations in one file. +This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created. + +3. Use labels instead of multiple metrics + +Labels allow you to define one metric such as the time it takes to perform a certain action on an object. +If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action. + + +```go +containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action") +``` + +The last parameter is the label name or key. +When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for. + +```go +containerActions.WithValues("create").UpdateSince(start) +``` + +4. Always use a unit + +The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with. +For a timer, the standard unit is seconds and a counter's standard unit is a total. +For gauges you must provide the unit. +This package provides a standard set of units for use within the Docker projects. + +```go +Nanoseconds Unit = "nanoseconds" +Seconds Unit = "seconds" +Bytes Unit = "bytes" +Total Unit = "total" +``` + +If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds. + +## Docs + +Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). + +## HTTP Metrics + +To instrument a http handler, you can wrap the code like this: + +```go +namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"}) +httpMetrics := namespace.NewDefaultHttpMetrics() +metrics.Register(namespace) +instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler) +``` +Note: The `handler` label must be provided when a new namespace is created. + +## Additional Metrics + +Additional metrics are also defined here that are not available in the prometheus client. +If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. + + +## Copyright and license + +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/go-metrics/counter.go b/vendor/github.com/docker/go-metrics/counter.go new file mode 100644 index 00000000..fe36316a --- /dev/null +++ b/vendor/github.com/docker/go-metrics/counter.go @@ -0,0 +1,52 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Counter is a metrics that can only increment its current count +type Counter interface { + // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. + // + // If len(vs) == 0, increments the counter by 1. + Inc(vs ...float64) +} + +// LabeledCounter is counter that must have labels populated before use. +type LabeledCounter interface { + WithValues(vs ...string) Counter +} + +type labeledCounter struct { + pc *prometheus.CounterVec +} + +func (lc *labeledCounter) WithValues(vs ...string) Counter { + return &counter{pc: lc.pc.WithLabelValues(vs...)} +} + +func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { + lc.pc.Describe(ch) +} + +func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { + lc.pc.Collect(ch) +} + +type counter struct { + pc prometheus.Counter +} + +func (c *counter) Inc(vs ...float64) { + if len(vs) == 0 { + c.pc.Inc() + } + + c.pc.Add(sumFloat64(vs...)) +} + +func (c *counter) Describe(ch chan<- *prometheus.Desc) { + c.pc.Describe(ch) +} + +func (c *counter) Collect(ch chan<- prometheus.Metric) { + c.pc.Collect(ch) +} diff --git a/vendor/github.com/docker/go-metrics/docs.go b/vendor/github.com/docker/go-metrics/docs.go new file mode 100644 index 00000000..8fbdfc69 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/docs.go @@ -0,0 +1,3 @@ +// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +package metrics diff --git a/vendor/github.com/docker/go-metrics/gauge.go b/vendor/github.com/docker/go-metrics/gauge.go new file mode 100644 index 00000000..74296e87 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/gauge.go @@ -0,0 +1,72 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Gauge is a metric that allows incrementing and decrementing a value +type Gauge interface { + Inc(...float64) + Dec(...float64) + + // Add adds the provided value to the gauge's current value + Add(float64) + + // Set replaces the gauge's current value with the provided value + Set(float64) +} + +// LabeledGauge describes a gauge the must have values populated before use. +type LabeledGauge interface { + WithValues(labels ...string) Gauge +} + +type labeledGauge struct { + pg *prometheus.GaugeVec +} + +func (lg *labeledGauge) WithValues(labels ...string) Gauge { + return &gauge{pg: lg.pg.WithLabelValues(labels...)} +} + +func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { + lg.pg.Describe(c) +} + +func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { + lg.pg.Collect(c) +} + +type gauge struct { + pg prometheus.Gauge +} + +func (g *gauge) Inc(vs ...float64) { + if len(vs) == 0 { + g.pg.Inc() + } + + g.Add(sumFloat64(vs...)) +} + +func (g *gauge) Dec(vs ...float64) { + if len(vs) == 0 { + g.pg.Dec() + } + + g.Add(-sumFloat64(vs...)) +} + +func (g *gauge) Add(v float64) { + g.pg.Add(v) +} + +func (g *gauge) Set(v float64) { + g.pg.Set(v) +} + +func (g *gauge) Describe(c chan<- *prometheus.Desc) { + g.pg.Describe(c) +} + +func (g *gauge) Collect(c chan<- prometheus.Metric) { + g.pg.Collect(c) +} diff --git a/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/docker/go-metrics/handler.go new file mode 100644 index 00000000..05601e9e --- /dev/null +++ b/vendor/github.com/docker/go-metrics/handler.go @@ -0,0 +1,74 @@ +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// HTTPHandlerOpts describes a set of configurable options of http metrics +type HTTPHandlerOpts struct { + DurationBuckets []float64 + RequestSizeBuckets []float64 + ResponseSizeBuckets []float64 +} + +const ( + InstrumentHandlerResponseSize = iota + InstrumentHandlerRequestSize + InstrumentHandlerDuration + InstrumentHandlerCounter + InstrumentHandlerInFlight +) + +type HTTPMetric struct { + prometheus.Collector + handlerType int +} + +var ( + defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60} + defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G + defaultResponseSizeBuckets = defaultRequestSizeBuckets +) + +// Handler returns the global http.Handler that provides the prometheus +// metrics format on GET requests. This handler is no longer instrumented. +func Handler() http.Handler { + return promhttp.Handler() +} + +func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(metrics, handler.ServeHTTP) +} + +func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc { + var handler http.Handler + handler = http.HandlerFunc(handlerFunc) + for _, metric := range metrics { + switch metric.handlerType { + case InstrumentHandlerResponseSize: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerResponseSize(collector, handler) + } + case InstrumentHandlerRequestSize: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerRequestSize(collector, handler) + } + case InstrumentHandlerDuration: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerDuration(collector, handler) + } + case InstrumentHandlerCounter: + if collector, ok := metric.Collector.(*prometheus.CounterVec); ok { + handler = promhttp.InstrumentHandlerCounter(collector, handler) + } + case InstrumentHandlerInFlight: + if collector, ok := metric.Collector.(prometheus.Gauge); ok { + handler = promhttp.InstrumentHandlerInFlight(collector, handler) + } + } + } + return handler.ServeHTTP +} diff --git a/vendor/github.com/docker/go-metrics/helpers.go b/vendor/github.com/docker/go-metrics/helpers.go new file mode 100644 index 00000000..68b7f51b --- /dev/null +++ b/vendor/github.com/docker/go-metrics/helpers.go @@ -0,0 +1,10 @@ +package metrics + +func sumFloat64(vs ...float64) float64 { + var sum float64 + for _, v := range vs { + sum += v + } + + return sum +} diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go new file mode 100644 index 00000000..79831545 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/namespace.go @@ -0,0 +1,315 @@ +package metrics + +import ( + "fmt" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +type Labels map[string]string + +// NewNamespace returns a namespaces that is responsible for managing a collection of +// metrics for a particual namespace and subsystem +// +// labels allows const labels to be added to all metrics created in this namespace +// and are commonly used for data like application version and git commit +func NewNamespace(name, subsystem string, labels Labels) *Namespace { + if labels == nil { + labels = make(map[string]string) + } + return &Namespace{ + name: name, + subsystem: subsystem, + labels: labels, + } +} + +// Namespace describes a set of metrics that share a namespace and subsystem. +type Namespace struct { + name string + subsystem string + labels Labels + mu sync.Mutex + metrics []prometheus.Collector +} + +// WithConstLabels returns a namespace with the provided set of labels merged +// with the existing constant labels on the namespace. +// +// Only metrics created with the returned namespace will get the new constant +// labels. The returned namespace must be registered separately. +func (n *Namespace) WithConstLabels(labels Labels) *Namespace { + n.mu.Lock() + ns := &Namespace{ + name: n.name, + subsystem: n.subsystem, + labels: mergeLabels(n.labels, labels), + } + n.mu.Unlock() + return ns +} + +func (n *Namespace) NewCounter(name, help string) Counter { + c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} + n.Add(c) + return c +} + +func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { + c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} + n.Add(c) + return c +} + +func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Total), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewTimer(name, help string) Timer { + t := &timer{ + m: prometheus.NewHistogram(n.newTimerOpts(name, help)), + } + n.Add(t) + return t +} + +func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { + t := &labeledTimer{ + m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), + } + n.Add(t) + return t +} + +func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Seconds), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { + g := &gauge{ + pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), + } + n.Add(g) + return g +} + +func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { + g := &labeledGauge{ + pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), + } + n.Add(g) + return g +} + +func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, unit), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Describe(ch) + } +} + +func (n *Namespace) Collect(ch chan<- prometheus.Metric) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Collect(ch) + } +} + +func (n *Namespace) Add(collector prometheus.Collector) { + n.mu.Lock() + n.metrics = append(n.metrics, collector) + n.mu.Unlock() +} + +func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { + name = makeName(name, unit) + namespace := n.name + if n.subsystem != "" { + namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) + } + name = fmt.Sprintf("%s_%s", namespace, name) + return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) +} + +// mergeLabels merges two or more labels objects into a single map, favoring +// the later labels. +func mergeLabels(lbs ...Labels) Labels { + merged := make(Labels) + + for _, target := range lbs { + for k, v := range target { + merged[k] = v + } + } + + return merged +} + +func makeName(name string, unit Unit) string { + if unit == "" { + return name + } + + return fmt.Sprintf("%s_%s", name, unit) +} + +func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric { + return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ + DurationBuckets: defaultDurationBuckets, + RequestSizeBuckets: defaultResponseSizeBuckets, + ResponseSizeBuckets: defaultResponseSizeBuckets, + }) +} + +func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric { + return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ + DurationBuckets: durationBuckets, + RequestSizeBuckets: requestSizeBuckets, + ResponseSizeBuckets: responseSizeBuckets, + }) +} + +func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric { + var httpMetrics []*HTTPMetric + inFlightMetric := n.NewInFlightGaugeMetric(handlerName) + requestTotalMetric := n.NewRequestTotalMetric(handlerName) + requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets) + requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets) + responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets) + httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric) + return httpMetrics +} + +func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric { + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + metric := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "in_flight_requests", + Help: "The in-flight HTTP requests", + ConstLabels: prometheus.Labels(labels), + }) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerInFlight, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric { + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + metric := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: prometheus.Labels(labels), + }, + []string{"code", "method"}, + ) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerCounter, + } + n.Add(httpMetric) + return httpMetric +} +func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("DurationBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "request_duration_seconds", + Help: "The HTTP request latencies in seconds.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metric := prometheus.NewHistogramVec(opts, []string{"method"}) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerDuration, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("RequestSizeBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "request_size_bytes", + Help: "The HTTP request sizes in bytes.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metric := prometheus.NewHistogramVec(opts, []string{}) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerRequestSize, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("ResponseSizeBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "response_size_bytes", + Help: "The HTTP response sizes in bytes.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metrics := prometheus.NewHistogramVec(opts, []string{}) + httpMetric := &HTTPMetric{ + Collector: metrics, + handlerType: InstrumentHandlerResponseSize, + } + n.Add(httpMetric) + return httpMetric +} diff --git a/vendor/github.com/docker/go-metrics/register.go b/vendor/github.com/docker/go-metrics/register.go new file mode 100644 index 00000000..708358df --- /dev/null +++ b/vendor/github.com/docker/go-metrics/register.go @@ -0,0 +1,15 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Register adds all the metrics in the provided namespace to the global +// metrics registry +func Register(n *Namespace) { + prometheus.MustRegister(n) +} + +// Deregister removes all the metrics in the provided namespace from the +// global metrics registry +func Deregister(n *Namespace) { + prometheus.Unregister(n) +} diff --git a/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/docker/go-metrics/timer.go new file mode 100644 index 00000000..824c9873 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/timer.go @@ -0,0 +1,85 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// StartTimer begins a timer observation at the callsite. When the target +// operation is completed, the caller should call the return done func(). +func StartTimer(timer Timer) (done func()) { + start := time.Now() + return func() { + timer.Update(time.Since(start)) + } +} + +// Timer is a metric that allows collecting the duration of an action in seconds +type Timer interface { + // Update records an observation, duration, and converts to the target + // units. + Update(duration time.Duration) + + // UpdateSince will add the duration from the provided starting time to the + // timer's summary with the precisions that was used in creation of the timer + UpdateSince(time.Time) +} + +// LabeledTimer is a timer that must have label values populated before use. +type LabeledTimer interface { + WithValues(labels ...string) *labeledTimerObserver +} + +type labeledTimer struct { + m *prometheus.HistogramVec +} + +type labeledTimerObserver struct { + m prometheus.Observer +} + +func (lbo *labeledTimerObserver) Update(duration time.Duration) { + lbo.m.Observe(duration.Seconds()) +} + +func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { + lbo.m.Observe(time.Since(since).Seconds()) +} + +func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { + return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} +} + +func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { + lt.m.Describe(c) +} + +func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { + lt.m.Collect(c) +} + +type timer struct { + m prometheus.Observer +} + +func (t *timer) Update(duration time.Duration) { + t.m.Observe(duration.Seconds()) +} + +func (t *timer) UpdateSince(since time.Time) { + t.m.Observe(time.Since(since).Seconds()) +} + +func (t *timer) Describe(c chan<- *prometheus.Desc) { + c <- t.m.(prometheus.Metric).Desc() +} + +func (t *timer) Collect(c chan<- prometheus.Metric) { + // Are there any observers that don't implement Collector? It is really + // unclear what the point of the upstream change was, but we'll let this + // panic if we get an observer that doesn't implement collector. In this + // case, we should almost always see metricVec objects, so this should + // never panic. + t.m.(prometheus.Collector).Collect(c) +} diff --git a/vendor/github.com/docker/go-metrics/unit.go b/vendor/github.com/docker/go-metrics/unit.go new file mode 100644 index 00000000..c96622f9 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/unit.go @@ -0,0 +1,12 @@ +package metrics + +// Unit represents the type or precision of a metric that is appended to +// the metrics fully qualified name +type Unit string + +const ( + Nanoseconds Unit = "nanoseconds" + Seconds Unit = "seconds" + Bytes Unit = "bytes" + Total Unit = "total" +) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE new file mode 100644 index 00000000..5d8cb5b7 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/README.md b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md new file mode 100644 index 00000000..751ee696 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md @@ -0,0 +1,20 @@ +# Overview +This repository provides various Protocol Buffer extensions for the Go +language (golang), namely support for record length-delimited message +streaming. + +| Java | Go | +| ------------------------------ | --------------------- | +| MessageLite#parseDelimitedFrom | pbutil.ReadDelimited | +| MessageLite#writeDelimitedTo | pbutil.WriteDelimited | + +Because [Code Review 9102043](https://codereview.appspot.com/9102043/) is +destined to never be merged into mainline (i.e., never be promoted to formal +[goprotobuf features](https://github.com/golang/protobuf)), this repository +will live here in the wild. + +# Documentation +We have [generated Go Doc documentation](http://godoc.org/github.com/matttproud/golang_protobuf_extensions/pbutil) here. + +# Testing +[![Build Status](https://travis-ci.org/matttproud/golang_protobuf_extensions.png?branch=master)](https://travis-ci.org/matttproud/golang_protobuf_extensions) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 00000000..258c0636 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + var headerBuf [binary.MaxVarintLen32]byte + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 00000000..c318385c --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 00000000..8fb59ad2 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + var buf [binary.MaxVarintLen32]byte + encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go deleted file mode 100644 index 35786f22..00000000 --- a/vendor/github.com/miekg/dns/msg_generate.go +++ /dev/null @@ -1,340 +0,0 @@ -//+build ignore - -// msg_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate pack/unpack methods based on the struct tags. The generated source is -// written to zmsg.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" -) - -var packageHdr = ` -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from msg_generate.go - -package dns - -` - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - fmt.Fprint(b, "// pack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name) - fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress) -if err != nil { - return off, err -} -headerEnd := off -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("off, err = packStringTxt(rr.%s, msg, off)\n") - case `dns:"opt"`: - o("off, err = packDataOpt(rr.%s, msg, off)\n") - case `dns:"nsec"`: - o("off, err = packDataNsec(rr.%s, msg, off)\n") - case `dns:"domain-name"`: - o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - fallthrough - case st.Tag(i) == `dns:"domain-name"`: - o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") - case st.Tag(i) == `dns:"a"`: - o("off, err = packDataA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"aaaa"`: - o("off, err = packDataAAAA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"uint48"`: - o("off, err = packUint48(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"txt"`: - o("off, err = packString(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 - fallthrough - case st.Tag(i) == `dns:"base32"`: - o("off, err = packStringBase32(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("off, err = packStringBase64(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): // Hack to fix empty salt length for NSEC3 - o("if rr.%s == \"-\" { /* do nothing, empty salt */ }\n") - continue - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("off, err = packStringHex(rr.%s, msg, off)\n") - - case st.Tag(i) == `dns:"octet"`: - o("off, err = packStringOctet(rr.%s, msg, off)\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("off, err = packUint8(rr.%s, msg, off)\n") - case types.Uint16: - o("off, err = packUint16(rr.%s, msg, off)\n") - case types.Uint32: - o("off, err = packUint32(rr.%s, msg, off)\n") - case types.Uint64: - o("off, err = packUint64(rr.%s, msg, off)\n") - case types.String: - o("off, err = packString(rr.%s, msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - // We have packed everything, only now we know the rdlength of this RR - fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)") - fmt.Fprintln(b, "return off, nil }\n") - } - - fmt.Fprint(b, "// unpack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name) - fmt.Fprintf(b, "rr := new(%s)\n", name) - fmt.Fprint(b, "rr.Hdr = h\n") - fmt.Fprint(b, `if noRdata(h) { -return rr, off, nil - } -var err error -rdStart := off -_ = rdStart - -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return rr, off, err -} -`) - } - - // size-* are special, because they reference a struct member we should use for the length. - if strings.HasPrefix(st.Tag(i), `dns:"size-`) { - structMember := structMember(st.Tag(i)) - structTag := structTag(st.Tag(i)) - switch structTag { - case "hex": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base32": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base64": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - fmt.Fprint(b, `if err != nil { -return rr, off, err -} -`) - continue - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("rr.%s, off, err = unpackStringTxt(msg, off)\n") - case `dns:"opt"`: - o("rr.%s, off, err = unpackDataOpt(msg, off)\n") - case `dns:"nsec"`: - o("rr.%s, off, err = unpackDataNsec(msg, off)\n") - case `dns:"domain-name"`: - o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"cdomain-name"`: - fallthrough - case `dns:"domain-name"`: - o("rr.%s, off, err = UnpackDomainName(msg, off)\n") - case `dns:"a"`: - o("rr.%s, off, err = unpackDataA(msg, off)\n") - case `dns:"aaaa"`: - o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") - case `dns:"uint48"`: - o("rr.%s, off, err = unpackUint48(msg, off)\n") - case `dns:"txt"`: - o("rr.%s, off, err = unpackString(msg, off)\n") - case `dns:"base32"`: - o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"base64"`: - o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"hex"`: - o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"octet"`: - o("rr.%s, off, err = unpackStringOctet(msg, off)\n") - case "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("rr.%s, off, err = unpackUint8(msg, off)\n") - case types.Uint16: - o("rr.%s, off, err = unpackUint16(msg, off)\n") - case types.Uint32: - o("rr.%s, off, err = unpackUint32(msg, off)\n") - case types.Uint64: - o("rr.%s, off, err = unpackUint64(msg, off)\n") - case types.String: - o("rr.%s, off, err = unpackString(msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - // If we've hit len(msg) we return without error. - if i < st.NumFields()-1 { - fmt.Fprintf(b, `if off == len(msg) { -return rr, off, nil - } -`) - } - } - fmt.Fprintf(b, "return rr, off, err }\n\n") - } - // Generate typeToUnpack map - fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){") - for _, name := range namedTypes { - if name == "RFC3597" { - continue - } - fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name) - } - fmt.Fprintln(b, "}\n") - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zmsg.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. -func structMember(s string) string { - fields := strings.Split(s, ":") - if len(fields) == 0 { - return "" - } - f := fields[len(fields)-1] - // f should have a closing " - if len(f) > 1 { - return f[:len(f)-1] - } - return f -} - -// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. -func structTag(s string) string { - fields := strings.Split(s, ":") - if len(fields) < 2 { - return "" - } - return fields[1][len("\"size-"):] -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go deleted file mode 100644 index bf80da32..00000000 --- a/vendor/github.com/miekg/dns/types_generate.go +++ /dev/null @@ -1,271 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" - "text/template" -) - -var skipLen = map[string]struct{}{ - "NSEC": {}, - "NSEC3": {}, - "OPT": {}, -} - -var packageHdr = ` -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from type_generate.go - -package dns - -import ( - "encoding/base64" - "net" -) - -` - -var TypeToRR = template.Must(template.New("TypeToRR").Parse(` -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ -{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, -{{end}}{{end}} } - -`)) - -var typeToString = template.Must(template.New("typeToString").Parse(` -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ -{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", -{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", -} - -`)) - -var headerFunc = template.Must(template.New("headerFunc").Parse(` -// Header() functions -{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } -{{end}} - -`)) - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect constants like TypeX - var numberedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - b, ok := o.Type().(*types.Basic) - if !ok || b.Kind() != types.Uint16 { - continue - } - if !strings.HasPrefix(o.Name(), "Type") { - continue - } - name := strings.TrimPrefix(o.Name(), "Type") - if name == "PrivateRR" { - continue - } - numberedTypes = append(numberedTypes, name) - } - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate TypeToRR - fatalIfErr(TypeToRR.Execute(b, namedTypes)) - - // Generate typeToString - fatalIfErr(typeToString.Execute(b, numberedTypes)) - - // Generate headerFunc - fatalIfErr(headerFunc.Execute(b, namedTypes)) - - // Generate len() - fmt.Fprint(b, "// len() functions\n") - for _, name := range namedTypes { - if _, ok := skipLen[name]; ok { - continue - } - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) len() int {\n", name) - fmt.Fprintf(b, "l := rr.Hdr.len()\n") - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`: - o("for _, x := range rr.%s { l += len(x) + 1 }\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: - // ignored - case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`: - o("l += len(rr.%s) + 1\n") - case st.Tag(i) == `dns:"octet"`: - o("l += len(rr.%s)\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("l += len(rr.%s)/2 + 1\n") - case st.Tag(i) == `dns:"a"`: - o("l += net.IPv4len // %s\n") - case st.Tag(i) == `dns:"aaaa"`: - o("l += net.IPv6len // %s\n") - case st.Tag(i) == `dns:"txt"`: - o("for _, t := range rr.%s { l += len(t) + 1 }\n") - case st.Tag(i) == `dns:"uint48"`: - o("l += 6 // %s\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("l += 1 // %s\n") - case types.Uint16: - o("l += 2 // %s\n") - case types.Uint32: - o("l += 4 // %s\n") - case types.Uint64: - o("l += 8 // %s\n") - case types.String: - o("l += len(rr.%s) + 1\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintf(b, "return l }\n") - } - - // Generate copy() - fmt.Fprint(b, "// copy() functions\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) - fields := []string{"*rr.Hdr.copyHeader()"} - for i := 1; i < st.NumFields(); i++ { - f := st.Field(i).Name() - if sl, ok := st.Field(i).Type().(*types.Slice); ok { - t := sl.Underlying().String() - t = strings.TrimPrefix(t, "[]") - if strings.Contains(t, ".") { - splits := strings.Split(t, ".") - t = splits[len(splits)-1] - } - fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", - f, t, f, f, f) - fields = append(fields, f) - continue - } - if st.Field(i).Type().String() == "net.IP" { - fields = append(fields, "copyIP(rr."+f+")") - continue - } - fields = append(fields, "rr."+f) - } - fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) - fmt.Fprintf(b, "}\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("ztypes.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 00000000..dd878a30 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md new file mode 100644 index 00000000..0d0156d3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/README.md @@ -0,0 +1,93 @@ +# Prometheus Go client library + +[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang) + +This is the [Go](http://golang.org) client library for +[Prometheus](http://prometheus.io). It has two separate parts, one for +instrumenting application code, and one for creating clients that talk to the +Prometheus HTTP API. + +__This library requires Go1.7 or later.__ + +## Important note about releases, versioning, tagging, stability, and your favorite Go dependency management tool + +While our goal is to follow [Semantic Versioning](https://semver.org/), this +repository is still pre-1.0.0. To quote the +[Semantic Versioning spec](https://semver.org/#spec-item-4): “Anything may +change at any time. The public API should not be considered stable.” We know +that this is at odds with the widespread use of this library. However, just +declaring something 1.0.0 doesn't make it 1.0.0. Instead, we are working +towards a 1.0.0 release that actually deserves its major version number. + +Having said that, we aim for always keeping the tip of master in a workable +state and for only introducing ”mildly” breaking changes up to and including +[v0.9.0](https://github.com/prometheus/client_golang/milestone/1). After that, +a number of ”hard” breaking changes are planned, see the +[v0.10.0 milestone](https://github.com/prometheus/client_golang/milestone/2), +which should get the library much closer to 1.0.0 state. + +Dependency management in Go projects is still in flux, and there are many tools +floating around. While [dep](https://golang.github.io/dep/) might develop into +the de-facto standard tool, it is still officially experimental. The roadmap +for this library has been laid out with a lot of sometimes painful experience +in mind. We really cannot adjust it every other month to the needs of the +currently most popular or most promising Go dependency management tool. The +recommended course of action with dependency management tools is the following: + +- Do not expect strict post-1.0.0 semver semantics prior to the 1.0.0 + release. If your dependency management tool expects strict post-1.0.0 semver + semantics, you have to wait. Sorry. +- If you want absolute certainty, please lock to a specific commit. You can + also lock to tags, but please don't ask for more tagging. This would suggest + some release or stability testing procedure that simply is not in place. As + said above, we are aiming for stability of the tip of master, but if we + tagged every single commit, locking to tags would be the same as locking to + commits. +- If you want to get the newer features and improvements and are willing to + take the minor risk of newly introduced bugs and “mild” breakage, just always + update to the tip of master (which is essentially the original idea of Go + dependency management). We recommend to not use features marked as + _deprecated_ in this case. +- Once [v0.9.0](https://github.com/prometheus/client_golang/milestone/1) is + out, you could lock to v0.9.x to get bugfixes (and perhaps minor new + features) while avoiding the “hard” breakage that will come with post-0.9 + features. + +## Instrumenting applications + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) + +The +[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) +contains the instrumentation library. See the +[best practices section](http://prometheus.io/docs/practices/naming/) of the +Prometheus documentation to learn more about instrumenting applications. + +The +[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) +contains simple examples of instrumented code. + +## Client for the Prometheus HTTP API + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus/v1)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus/v1) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api) + +The +[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) +contains the client for the +[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you +to write Go applications that query time series data from a Prometheus +server. It is still in alpha stage. + +## Where is `model`, `extraction`, and `text`? + +The `model` packages has been moved to +[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model). + +The `extraction` and `text` packages are now contained in +[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt). + +## Contributing and community + +See the [contributing guidelines](CONTRIBUTING.md) and the +[Community section](http://prometheus.io/community/) of the homepage. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md new file mode 100644 index 00000000..44986bff --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md @@ -0,0 +1 @@ +See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 00000000..623d3d83 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by + // Describe. Returned metrics that share the same descriptor must differ + // in their variable label values. This method may be called + // concurrently and must therefore be implemented in a concurrency safe + // way. Blocking occurs at the expense of total performance of rendering + // all registered metrics. Ideally, Collector implementations support + // concurrent readers. + Collect(chan<- Metric) +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 00000000..765e4550 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,277 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "math" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair +} + +func (c *counter) Desc() *Desc { + return c.desc +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + return populateMetric(CounterValue, val, c.labelPairs, out) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +type CounterVec struct { + *metricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 00000000..4a755b0f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,188 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occurred during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !model.IsValidMetricName(model.LabelValue(fqName)) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 00000000..36ef1556 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,186 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides metrics primitives to instrument code for +// monitoring. It also offers a registry for metrics. Sub-packages allow to +// expose the registered metrics via HTTP (package promhttp) or push them to a +// Pushgateway (package push). +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. However, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegisterer variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegisterer comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// (The top-level functions in the prometheus package are deprecated.) +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 00000000..18a99d5f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 00000000..e3b67df8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,29 @@ +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 00000000..17c72d7e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,286 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. +func NewGauge(opts GaugeOpts) Gauge { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, out) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *metricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 00000000..096454af --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,284 @@ +package prometheus + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc + + // metrics to describe and collect + metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current +// go process. +func NewGoCollector() Collector { + return &goCollector{ + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + metrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutinesDesc + ch <- c.threadsDesc + ch <- c.gcDesc + ch <- c.goInfoDesc + for _, i := range c.metrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 00000000..331783a7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,505 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogram struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + + selfCollector + // Note that there is no mutex required. + + desc *Desc + + upperBounds []float64 + counts []uint64 + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) + } + atomic.AddUint64(&h.count, 1) + for { + oldBits := atomic.LoadUint64(&h.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + break + } + } +} + +func (h *histogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) + + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 + for i, upperBound := range h.upperBounds { + count += atomic.LoadUint64(&h.counts[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + } + } + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *metricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go new file mode 100644 index 00000000..dd0f8197 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -0,0 +1,523 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" +) + +// TODO(beorn7): Remove this whole file. It is a partial mirror of +// promhttp/http.go (to avoid circular import chains) where everything HTTP +// related should live. The functions here are just for avoiding +// breakage. Everything is deprecated. + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using +// promhttp.InstrumentedHandler instead. +func Handler() http.Handler { + return InstrumentHandler("prometheus", UninstrumentedHandler()) +} + +// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. +// +// Deprecated: Use promhttp.Handler instead. See there for further documentation. +func UninstrumentedHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := DefaultGatherer.Gather() + if err != nil { + http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + }) +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +// +// Deprecated: InstrumentHandler has several issues. Use the tooling provided in +// package promhttp instead. The issues are the following: +// +// - It uses Summaries rather than Histograms. Summaries are not useful if +// aggregation across multiple instances is required. +// +// - It uses microseconds as unit, which is deprecated and should be replaced by +// seconds. +// +// - The size of the request is calculated in a separate goroutine. Since this +// calculator requires access to the request header, it creates a race with +// any writes to the header performed during request handling. +// httputil.ReverseProxy is a prominent example for a handler +// performing such writes. +// +// - It has additional issues with HTTP/2, cf. +// https://github.com/prometheus/client_golang/issues/272. +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler (and shares the same +// issues). +// +// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same +// issues) but provides more flexibility (at the cost of a more complex call +// syntax). As InstrumentHandler, this function registers four metric +// collectors, but it uses the provided SummaryOpts to create them. However, the +// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced +// by "requests_total", "request_duration_microseconds", "request_size_bytes", +// and "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +// +// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares +// the same issues) but provides more flexibility (at the cost of a more complex +// call syntax). See InstrumentHandlerWithOpts for details how the provided +// SummaryOpts are used. +// +// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons +// as InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + if err := Register(reqCnt); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqCnt = are.ExistingCollector.(*CounterVec) + } else { + panic(err) + } + } + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + if err := Register(reqDur); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqDur = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + if err := Register(reqSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + if err := Register(resSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + resSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := computeApproximateRequestSize(r) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + reqCnt.WithLabelValues(method, code).Inc() + reqDur.Observe(elapsed) + resSz.Observe(float64(delegate.written)) + reqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request) <-chan int { + // Get URL length in current go routine for avoiding a race condition. + // HandlerFunc that runs in parallel may modify the URL. + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + out := make(chan int, 1) + + go func() { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s + close(out) + }() + + return out +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 00000000..2502e373 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,57 @@ +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return errInconsistentCardinality + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return errInconsistentCardinality + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 00000000..6213ee81 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. (Implementers may find + // LabelPairSorter useful for that.) Callers of Write should still make + // sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type hashSorter []uint64 + +func (s hashSorter) Len() int { + return len(s) +} + +func (s hashSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { + return s[i] < s[j] +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 00000000..5806cd09 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,52 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 00000000..32ac74a7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,140 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { + pid int + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, rss *Desc + startTime *Desc +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time for the given process ID under the given namespace. +// +// Currently, the collector depends on a Linux-style proc filesystem and +// therefore only exports metrics for Linux. +func NewProcessCollector(pid int, namespace string) Collector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn works like NewProcessCollector but the process ID is +// determined on each collect anew by calling the given pidFn function. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) Collector { + ns := "" + if len(namespace) > 0 { + ns = namespace + "_" + } + + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } + + if limits, err := p.NewLimits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go new file mode 100644 index 00000000..9c1c66dc --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -0,0 +1,199 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) + if r.observeWriteHeader != nil { + r.observeWriteHeader(code) + } +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type closeNotifierDelegator struct{ *responseWriterDelegator } +type flusherDelegator struct{ *responseWriterDelegator } +type hijackerDelegator struct{ *responseWriterDelegator } +type readerFromDelegator struct{ *responseWriterDelegator } + +func (d *closeNotifierDelegator) CloseNotify() <-chan bool { + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} +func (d *flusherDelegator) Flush() { + d.ResponseWriter.(http.Flusher).Flush() +} +func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} +func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return &closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return &flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return &hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, &readerFromDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go new file mode 100644 index 00000000..75a905e2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go @@ -0,0 +1,181 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "io" + "net/http" +) + +type pusherDelegator struct{ *responseWriterDelegator } + +func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +func init() { + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return &pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, &pusherDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, &pusherDelegator{d}, &hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, &pusherDelegator{d}, &readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go new file mode 100644 index 00000000..8bb9b8b6 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package promhttp + +import ( + "io" + "net/http" +) + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 00000000..cdca3e39 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,267 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. +func Handler() http.Handler { + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf, opts.DisableCompression) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding metric family:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + // Handled later. + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + // TODO(beorn7): Consider streaming serving of metrics. + }) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + if err := reg.Register(cnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. It is recommended to at least + // log errors (by providing an ErrorLog in HandlerOpts) to not mask + // errors completely. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { + if compressionDisabled { + return writer, "" + } + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 00000000..86fd5644 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,97 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go new file mode 100644 index 00000000..0bd80c35 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go @@ -0,0 +1,144 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "context" + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" +) + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 00000000..9db24380 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,447 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have zero, one, or two non-const non-curried labels. For those, the only +// allowed label names are "code" and "method". The function panics +// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or +// HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the response size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 00000000..bee37036 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,807 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "sort" + "sync" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector) already registered. This approach to +// keep default instances as global state mirrors the approach of other packages +// in the Go standard library. Note that there are caveats. Change the variables +// with caution and only if you understand the consequences. Users who want to +// avoid global state altogether should not use the convenience functions and +// act on custom instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather than the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // It is in general not safe to register the same Collector multiple + // times concurrently. + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of MetricFamily protobufs. Even if an error occurs, Gather attempts + // to gather as many metrics as possible. Hence, if a non-nil error is + // returned, the returned MetricFamily slice could be nil (in case of a + // fatal error that prevented any meaningful metric collection) or + // contain a number of MetricFamily protobufs, some of which might be + // incomplete, and some might be missing altogether. The returned error + // (which might be a MultiError) explains the details. In scenarios + // where complete collection is critical, the returned MetricFamily + // protobufs should be disregarded if the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer r.mtx.Unlock() + // Conduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return AlreadyRegisteredError{ + ExistingCollector: existing, + NewCollector: c, + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + metricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + collectors := make(chan Collector, len(r.collectorsByID)) + for _, collector := range r.collectorsByID { + collectors <- collector + } + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + r.mtx.RUnlock() + + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-collectors: + collector.Collect(metricChan) + wg.Done() + default: + return + } + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close the metricChan once all collectors are collected. + go func() { + wg.Wait() + close(metricChan) + }() + + // Drain metricChan in case of premature return. + defer func() { + for range metricChan { + } + }() + +collectLoop: + for { + select { + case metric, ok := <-metricChan: + if !ok { + // metricChan is closed, we are done. + break collectLoop + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, dimHashes, + registeredDescIDs, + )) + default: + if goroutineBudget <= 0 || len(collectors) == 0 { + // All collectors are aleady being worked on or + // we have already as many goroutines started as + // there are collectors. Just process metrics + // from now on. + for metric := range metricChan { + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, dimHashes, + registeredDescIDs, + )) + } + break collectLoop + } + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calles are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// normalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. The provided dimHashes maps +// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes +// doesn't yet contain a hash for the provided MetricFamily, it is +// added. Otherwise, an error is returned if the existing dimHashes in not equal +// the calculated dimHash. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, +) error { + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + for _, labelPair := range dtoMetric.GetLabel() { + if !utf8.ValidString(*labelPair.Value) { + return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value) + } + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := hashNew() + h = hashAdd(h, metricFamily.GetName()) + h = hashAddByte(h, separatorByte) + dh := hashNew() + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + dh = hashAdd(dh, lp.GetName()) + dh = hashAddByte(dh, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { + if dimHash != dh { + return fmt.Errorf( + "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", + metricFamily.GetName(), dtoMetric, + ) + } + } else { + dimHashes[metricFamily.GetName()] = dh + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 00000000..f7dc85b9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,609 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. However, the default behavior will change in the +// upcoming v0.10 of the library. There will be no rank estiamtions at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +// DefObjectives are the default Summary quantile values. +// +// Deprecated: DefObjectives will not be used as the default objectives in +// v0.10 of the library. The default Summary will have no quantiles then. +var ( + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. While all other fields +// are optional and can safely be left at their zero value, it is recommended to +// explicitly set the Objectives field to the desired value as the default value +// will change in the upcoming v0.10 of the library. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is DefObjectives. It is used if Objectives is left at + // its zero value (i.e. nil). To create a Summary without Objectives, + // set it to an empty map (i.e. map[float64]float64{}). + // + // Deprecated: Note that the current value of DefObjectives is + // deprecated. It will be replaced by an empty map in v0.10 of the + // library. Please explicitly set Objectives to the desired value. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if opts.Objectives == nil { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *metricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 00000000..b8fc5f18 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,51 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. ObserveDuration is +// usually called with a defer statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() { + if t.observer != nil { + t.observer.Observe(time.Since(t.begin).Seconds()) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 00000000..0f9ce63f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 00000000..543b57c2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,160 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + for _, lp := range desc.constLabelPairs { + labelPairs = append(labelPairs, lp) + } + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 00000000..cea15824 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,469 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. +// It also handles label currying. It uses basicMetricVec internally. +type metricVec struct { + *metricMap + + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &metricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricMap) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.metrics { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.metrics { + delete(m.metrics, h) + } +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + + i := findMetricWithLabelValues(metrics, lvs, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + metrics, ok := m.metrics[h] + if !ok { + return false + } + i := findMetricWithLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) + if !ok { + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) + if !ok { + lvs := extractLabelValues(m.desc, labels, curry) + metric = m.newMetric(lvs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabelValues(metric.values, lvs, curry) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { + return false + } + var iLVs, iCurry int + for i, v := range values { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { + return false + } + iLVs++ + } + return true +} + +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { + return false + } + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if values[i] != labels[k] { + return false + } + } + return true +} + +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = labels[k] + } + return labelValues +} + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 00000000..20110e41 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/README.md b/vendor/github.com/prometheus/client_model/README.md new file mode 100644 index 00000000..a710042d --- /dev/null +++ b/vendor/github.com/prometheus/client_model/README.md @@ -0,0 +1,26 @@ +# Background +Under most circumstances, manually downloading this repository should never +be required. + +# Prerequisites +# Base +* [Google Protocol Buffers](https://developers.google.com/protocol-buffers) + +## Java +* [Apache Maven](http://maven.apache.org) +* [Prometheus Maven Repository](https://github.com/prometheus/io.prometheus-maven-repository) checked out into ../io.prometheus-maven-repository + +## Go +* [Go](http://golang.org) +* [goprotobuf](https://code.google.com/p/goprotobuf) + +## Ruby +* [Ruby](https://www.ruby-lang.org) +* [bundler](https://rubygems.org/gems/bundler) + +# Building + $ make + +# Getting Started + * The Go source code is periodically indexed: [Go Protocol Buffer Model](http://godoc.org/github.com/prometheus/client_model/go). + * All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 00000000..b065f868 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,364 @@ +// Code generated by protoc-gen-go. +// source: metrics.proto +// DO NOT EDIT! + +/* +Package io_prometheus_client is a generated protocol buffer package. + +It is generated from these files: + metrics.proto + +It has these top-level messages: + LabelPair + Gauge + Counter + Quantile + Summary + Untyped + Histogram + Bucket + Metric + MetricFamily +*/ +package io_prometheus_client + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 00000000..636a2c1a --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md new file mode 100644 index 00000000..11a58494 --- /dev/null +++ b/vendor/github.com/prometheus/common/README.md @@ -0,0 +1,12 @@ +# Common +[![Build Status](https://travis-ci.org/prometheus/common.svg)](https://travis-ci.org/prometheus/common) + +This repository contains Go libraries that are shared across Prometheus +components and libraries. + +* **config**: Common configuration structures +* **expfmt**: Decoding and encoding for the exposition format +* **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus) +* **model**: Shared data structures +* **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context` +* **version**: Version informations and metric diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 00000000..a7a42d5e --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occured. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 00000000..11839ed6 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 00000000..371ac750 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 00000000..dc2eedee --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 00000000..f11321cd --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,303 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { + var written int + + // Fail-fast checks. + if len(in.Metric) == 0 { + return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return written, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err := fmt.Fprintf( + out, "# HELP %s %s\n", + name, escapeString(*in.Help, false), + ) + written += n + if err != nil { + return written, err + } + } + metricType := in.GetType() + n, err := fmt.Fprintf( + out, "# TYPE %s %s\n", + name, strings.ToLower(metricType.String()), + ) + written += n + if err != nil { + return written, err + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Counter.GetValue(), + out, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Gauge.GetValue(), + out, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Untyped.GetValue(), + out, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + name, metric, + model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + q.GetValue(), + out, + ) + written += n + if err != nil { + return written, err + } + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Summary.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Summary.GetSampleCount()), + out, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, q := range metric.Histogram.Bucket { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, fmt.Sprint(q.GetUpperBound()), + float64(q.GetCumulativeCount()), + out, + ) + written += n + if err != nil { + return written, err + } + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, "+Inf", + float64(metric.Histogram.GetSampleCount()), + out, + ) + if err != nil { + return written, err + } + written += n + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Histogram.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Histogram.GetSampleCount()), + out, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeSample writes a single sample in text format to out, given the metric +// name, the metric proto message itself, optionally an additional label name +// and value (use empty strings if not required), and the value. The function +// returns the number of bytes written and any error encountered. +func writeSample( + name string, + metric *dto.Metric, + additionalLabelName, additionalLabelValue string, + value float64, + out io.Writer, +) (int, error) { + var written int + n, err := fmt.Fprint(out, name) + written += n + if err != nil { + return written, err + } + n, err = labelPairsToText( + metric.Label, + additionalLabelName, additionalLabelValue, + out, + ) + written += n + if err != nil { + return written, err + } + n, err = fmt.Fprintf(out, " %v", value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + n, err = out.Write([]byte{'\n'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// labelPairsToText converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'out'. An empty slice in combination with an +// empty string 'additionalLabelName' results in nothing being +// written. Otherwise, the label pairs are written, escaped as required by the +// text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. +func labelPairsToText( + in []*dto.LabelPair, + additionalLabelName, additionalLabelValue string, + out io.Writer, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var written int + separator := '{' + for _, lp := range in { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, lp.GetName(), escapeString(lp.GetValue(), true), + ) + written += n + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, additionalLabelName, + escapeString(additionalLabelValue, true), + ) + written += n + if err != nil { + return written, err + } + } + n, err := out.Write([]byte{'}'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +var ( + escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) + escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +// escapeString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +func escapeString(v string, includeDoubleQuote bool) string { + if includeDoubleQuote { + return escapeWithDoubleQuote.Replace(v) + } + + return escape.Replace(v) +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 00000000..54bcfde2 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,757 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All +// other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt new file mode 100644 index 00000000..7723656d --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 00000000..648b38cb --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 00000000..35e739c7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 00000000..fc4de410 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 00000000..038fc1c9 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 00000000..41051a01 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 00000000..6eda08a7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 00000000..f7250909 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,103 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + separator = []byte{0} + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 00000000..a7b96917 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 00000000..8762b13c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 00000000..7538e299 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definiton +// in the Prometheus eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 00000000..74ed5a9f --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes and interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + *t = Time(v + va) + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// ParseDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + if ms == 0 { + return "0s" + } + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 00000000..c9ed3ffd --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// sematics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 00000000..53c5e9aa --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md new file mode 100644 index 00000000..20954947 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/README.md @@ -0,0 +1,11 @@ +# procfs + +This procfs package provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) +[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 00000000..d3a82680 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,95 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// NewBuddyInfo reads the buddyinfo statistics. +func NewBuddyInfo() ([]BuddyInfo, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewBuddyInfo() +} + +// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 00000000..e2acd6d4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 00000000..36c1586a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,69 @@ +package procfs + +import ( + "fmt" + "os" + "path" + + "github.com/prometheus/procfs/nfs" + "github.com/prometheus/procfs/xfs" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} + +// XFSStats retrieves XFS filesystem runtime statistics. +func (fs FS) XFSStats() (*xfs.Stats, error) { + f, err := os.Open(fs.Path("fs/xfs/stat")) + if err != nil { + return nil, err + } + defer f.Close() + + return xfs.ParseStats(f) +} + +// NFSdClientRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdClientRPCStats() (*nfs.ClientRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfs")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseClientRPCStats(f) +} + +// NFSdServerRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfsd")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseServerRPCStats(f) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 00000000..1ad21c91 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import "strconv" + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 00000000..5761b457 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,246 @@ +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The remote (real) IP address. + RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The remote (real) port. + RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 00000000..d7a248c0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,138 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device consists of. + DisksTotal int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := fs.Path("mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) + if err != nil { + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + mdStates := []MDStat{} + lines := strings.Split(string(content), "\n") + for i, l := range lines { + if l == "" { + continue + } + if l[0] == ' ' { + continue + } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + continue + } + + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) + } + mdName := mainLine[0] + activityState := mainLine[2] + + if len(lines) <= i+3 { + return mdStates, fmt.Errorf( + "error parsing %s: too few lines for md device %s", + mdStatusFilePath, + mdName, + ) + } + + active, total, size, err := evalStatusline(lines[i+1]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + // j is the line number of the syncing-line. + j := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + j = i + 3 + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + } + + mdStates = append(mdStates, MDStat{ + Name: mdName, + ActivityState: activityState, + DisksActive: active, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { + matches := statuslineRE.FindStringSubmatch(statusline) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) + } + + size, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + return active, total, size, nil +} + +func evalBuildline(buildline string) (syncedBlocks int64, err error) { + matches := buildlineRE.FindStringSubmatch(buildline) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected buildline: %s", buildline) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + } + + return syncedBlocks, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 00000000..6b2b0ba9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,556 @@ +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10Len = 10 + fieldTransport11Len = 13 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueTime time.Duration + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseTime time.Duration + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestTime time.Duration +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTime time.Duration + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + + switch ss[0] { + case fieldAge: + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[2:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Number of expected fields in each per-operation statistics set + numFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) != numFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, numFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + ops = append(ops, NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, + CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, + CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, + }) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + switch statVersion { + case statVersion10: + if len(ss) != fieldTransport10Len { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + if len(ss) != fieldTransport11Len { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11Len) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + return &NFSTransportStats{ + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTime: time.Duration(ns[4]) * time.Second, + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 00000000..f8c184ef --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,203 @@ +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func NewNetDev() (NetDev, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetDev() +} + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NewNetDev() (NetDev, error) { + return newNetDev(fs.Path("net/dev")) +} + +// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NewNetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + nd := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := nd.parseLine(s.Text()) + if err != nil { + return nd, err + } + + nd[line.Name] = *line + } + + return nd, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { + parts := strings.SplitN(rawLine, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(parts[1])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(parts[0]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma seperated list of interface names. +func (nd NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(nd)) + for _, ifc := range nd { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go new file mode 100644 index 00000000..e2185b78 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go @@ -0,0 +1,263 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nfsd implements parsing of /proc/net/rpc/nfsd. +// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ +package nfs + +// ReplyCache models the "rc" line. +type ReplyCache struct { + Hits uint64 + Misses uint64 + NoCache uint64 +} + +// FileHandles models the "fh" line. +type FileHandles struct { + Stale uint64 + TotalLookups uint64 + AnonLookups uint64 + DirNoCache uint64 + NoDirNoCache uint64 +} + +// InputOutput models the "io" line. +type InputOutput struct { + Read uint64 + Write uint64 +} + +// Threads models the "th" line. +type Threads struct { + Threads uint64 + FullCnt uint64 +} + +// ReadAheadCache models the "ra" line. +type ReadAheadCache struct { + CacheSize uint64 + CacheHistogram []uint64 + NotFound uint64 +} + +// Network models the "net" line. +type Network struct { + NetCount uint64 + UDPCount uint64 + TCPCount uint64 + TCPConnect uint64 +} + +// ClientRPC models the nfs "rpc" line. +type ClientRPC struct { + RPCCount uint64 + Retransmissions uint64 + AuthRefreshes uint64 +} + +// ServerRPC models the nfsd "rpc" line. +type ServerRPC struct { + RPCCount uint64 + BadCnt uint64 + BadFmt uint64 + BadAuth uint64 + BadcInt uint64 +} + +// V2Stats models the "proc2" line. +type V2Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Root uint64 + Lookup uint64 + ReadLink uint64 + Read uint64 + WrCache uint64 + Write uint64 + Create uint64 + Remove uint64 + Rename uint64 + Link uint64 + SymLink uint64 + MkDir uint64 + RmDir uint64 + ReadDir uint64 + FsStat uint64 +} + +// V3Stats models the "proc3" line. +type V3Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Lookup uint64 + Access uint64 + ReadLink uint64 + Read uint64 + Write uint64 + Create uint64 + MkDir uint64 + SymLink uint64 + MkNod uint64 + Remove uint64 + RmDir uint64 + Rename uint64 + Link uint64 + ReadDir uint64 + ReadDirPlus uint64 + FsStat uint64 + FsInfo uint64 + PathConf uint64 + Commit uint64 +} + +// ClientV4Stats models the nfs "proc4" line. +type ClientV4Stats struct { + Null uint64 + Read uint64 + Write uint64 + Commit uint64 + Open uint64 + OpenConfirm uint64 + OpenNoattr uint64 + OpenDowngrade uint64 + Close uint64 + Setattr uint64 + FsInfo uint64 + Renew uint64 + SetClientId uint64 + SetClientIdConfirm uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Access uint64 + Getattr uint64 + Lookup uint64 + LookupRoot uint64 + Remove uint64 + Rename uint64 + Link uint64 + Symlink uint64 + Create uint64 + Pathconf uint64 + StatFs uint64 + ReadLink uint64 + ReadDir uint64 + ServerCaps uint64 + DelegReturn uint64 + GetAcl uint64 + SetAcl uint64 + FsLocations uint64 + ReleaseLockowner uint64 + Secinfo uint64 + FsidPresent uint64 + ExchangeId uint64 + CreateSession uint64 + DestroySession uint64 + Sequence uint64 + GetLeaseTime uint64 + ReclaimComplete uint64 + LayoutGet uint64 + GetDeviceInfo uint64 + LayoutCommit uint64 + LayoutReturn uint64 + SecinfoNoName uint64 + TestStateId uint64 + FreeStateId uint64 + GetDeviceList uint64 + BindConnToSession uint64 + DestroyClientId uint64 + Seek uint64 + Allocate uint64 + DeAllocate uint64 + LayoutStats uint64 + Clone uint64 +} + +// ServerV4Stats models the nfsd "proc4" line. +type ServerV4Stats struct { + Null uint64 + Compound uint64 +} + +// V4Ops models the "proc4ops" line: NFSv4 operations +// Variable list, see: +// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) +// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) +// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) +type V4Ops struct { + //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? + Op0Unused uint64 + Op1Unused uint64 + Op2Future uint64 + Access uint64 + Close uint64 + Commit uint64 + Create uint64 + DelegPurge uint64 + DelegReturn uint64 + GetAttr uint64 + GetFH uint64 + Link uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Lookup uint64 + LookupRoot uint64 + Nverify uint64 + Open uint64 + OpenAttr uint64 + OpenConfirm uint64 + OpenDgrd uint64 + PutFH uint64 + PutPubFH uint64 + PutRootFH uint64 + Read uint64 + ReadDir uint64 + ReadLink uint64 + Remove uint64 + Rename uint64 + Renew uint64 + RestoreFH uint64 + SaveFH uint64 + SecInfo uint64 + SetAttr uint64 + Verify uint64 + Write uint64 + RelLockOwner uint64 +} + +// RPCStats models all stats from /proc/net/rpc/nfs. +type ClientRPCStats struct { + Network Network + ClientRPC ClientRPC + V2Stats V2Stats + V3Stats V3Stats + ClientV4Stats ClientV4Stats +} + +// ServerRPCStats models all stats from /proc/net/rpc/nfsd. +type ServerRPCStats struct { + ReplyCache ReplyCache + FileHandles FileHandles + InputOutput InputOutput + Threads Threads + ReadAheadCache ReadAheadCache + Network Network + ServerRPC ServerRPC + V2Stats V2Stats + V3Stats V3Stats + ServerV4Stats ServerV4Stats + V4Ops V4Ops +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go new file mode 100644 index 00000000..3aa32563 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse.go @@ -0,0 +1,308 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "fmt" +) + +func parseReplyCache(v []uint64) (ReplyCache, error) { + if len(v) != 3 { + return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) + } + + return ReplyCache{ + Hits: v[0], + Misses: v[1], + NoCache: v[2], + }, nil +} + +func parseFileHandles(v []uint64) (FileHandles, error) { + if len(v) != 5 { + return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) + } + + return FileHandles{ + Stale: v[0], + TotalLookups: v[1], + AnonLookups: v[2], + DirNoCache: v[3], + NoDirNoCache: v[4], + }, nil +} + +func parseInputOutput(v []uint64) (InputOutput, error) { + if len(v) != 2 { + return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) + } + + return InputOutput{ + Read: v[0], + Write: v[1], + }, nil +} + +func parseThreads(v []uint64) (Threads, error) { + if len(v) != 2 { + return Threads{}, fmt.Errorf("invalid Threads line %q", v) + } + + return Threads{ + Threads: v[0], + FullCnt: v[1], + }, nil +} + +func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { + if len(v) != 12 { + return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) + } + + return ReadAheadCache{ + CacheSize: v[0], + CacheHistogram: v[1:11], + NotFound: v[11], + }, nil +} + +func parseNetwork(v []uint64) (Network, error) { + if len(v) != 4 { + return Network{}, fmt.Errorf("invalid Network line %q", v) + } + + return Network{ + NetCount: v[0], + UDPCount: v[1], + TCPCount: v[2], + TCPConnect: v[3], + }, nil +} + +func parseServerRPC(v []uint64) (ServerRPC, error) { + if len(v) != 5 { + return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ServerRPC{ + RPCCount: v[0], + BadCnt: v[1], + BadFmt: v[2], + BadAuth: v[3], + BadcInt: v[4], + }, nil +} + +func parseClientRPC(v []uint64) (ClientRPC, error) { + if len(v) != 3 { + return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ClientRPC{ + RPCCount: v[0], + Retransmissions: v[1], + AuthRefreshes: v[2], + }, nil +} + +func parseV2Stats(v []uint64) (V2Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 18 { + return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) + } + + return V2Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Root: v[4], + Lookup: v[5], + ReadLink: v[6], + Read: v[7], + WrCache: v[8], + Write: v[9], + Create: v[10], + Remove: v[11], + Rename: v[12], + Link: v[13], + SymLink: v[14], + MkDir: v[15], + RmDir: v[16], + ReadDir: v[17], + FsStat: v[18], + }, nil +} + +func parseV3Stats(v []uint64) (V3Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 22 { + return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) + } + + return V3Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Lookup: v[4], + Access: v[5], + ReadLink: v[6], + Read: v[7], + Write: v[8], + Create: v[9], + MkDir: v[10], + SymLink: v[11], + MkNod: v[12], + Remove: v[13], + RmDir: v[14], + Rename: v[15], + Link: v[16], + ReadDir: v[17], + ReadDirPlus: v[18], + FsStat: v[19], + FsInfo: v[20], + PathConf: v[21], + Commit: v[22], + }, nil +} + +func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 59 { + return ClientV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ClientV4Stats{ + Null: v[1], + Read: v[2], + Write: v[3], + Commit: v[4], + Open: v[5], + OpenConfirm: v[6], + OpenNoattr: v[7], + OpenDowngrade: v[8], + Close: v[9], + Setattr: v[10], + FsInfo: v[11], + Renew: v[12], + SetClientId: v[13], + SetClientIdConfirm: v[14], + Lock: v[15], + Lockt: v[16], + Locku: v[17], + Access: v[18], + Getattr: v[19], + Lookup: v[20], + LookupRoot: v[21], + Remove: v[22], + Rename: v[23], + Link: v[24], + Symlink: v[25], + Create: v[26], + Pathconf: v[27], + StatFs: v[28], + ReadLink: v[29], + ReadDir: v[30], + ServerCaps: v[31], + DelegReturn: v[32], + GetAcl: v[33], + SetAcl: v[34], + FsLocations: v[35], + ReleaseLockowner: v[36], + Secinfo: v[37], + FsidPresent: v[38], + ExchangeId: v[39], + CreateSession: v[40], + DestroySession: v[41], + Sequence: v[42], + GetLeaseTime: v[43], + ReclaimComplete: v[44], + LayoutGet: v[45], + GetDeviceInfo: v[46], + LayoutCommit: v[47], + LayoutReturn: v[48], + SecinfoNoName: v[49], + TestStateId: v[50], + FreeStateId: v[51], + GetDeviceList: v[52], + BindConnToSession: v[53], + DestroyClientId: v[54], + Seek: v[55], + Allocate: v[56], + DeAllocate: v[57], + LayoutStats: v[58], + Clone: v[59], + }, nil +} + +func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 2 { + return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ServerV4Stats{ + Null: v[1], + Compound: v[2], + }, nil +} + +func parseV4Ops(v []uint64) (V4Ops, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 39 { + return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) + } + + stats := V4Ops{ + Op0Unused: v[1], + Op1Unused: v[2], + Op2Future: v[3], + Access: v[4], + Close: v[5], + Commit: v[6], + Create: v[7], + DelegPurge: v[8], + DelegReturn: v[9], + GetAttr: v[10], + GetFH: v[11], + Link: v[12], + Lock: v[13], + Lockt: v[14], + Locku: v[15], + Lookup: v[16], + LookupRoot: v[17], + Nverify: v[18], + Open: v[19], + OpenAttr: v[20], + OpenConfirm: v[21], + OpenDgrd: v[22], + PutFH: v[23], + PutPubFH: v[24], + PutRootFH: v[25], + Read: v[26], + ReadDir: v[27], + ReadLink: v[28], + Remove: v[29], + Rename: v[30], + Renew: v[31], + RestoreFH: v[32], + SaveFH: v[33], + SecInfo: v[34], + SetAttr: v[35], + Verify: v[36], + Write: v[37], + RelLockOwner: v[38], + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go new file mode 100644 index 00000000..b5c0b15f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs +func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { + stats := &ClientRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + + values, err := util.ParseUint64s(parts[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ClientRPC, err = parseClientRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ClientV4Stats, err = parseClientV4Stats(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go new file mode 100644 index 00000000..57bb4a35 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd +func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { + stats := &ServerRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + label := parts[0] + + var values []uint64 + var err error + if label == "th" { + if len(parts) < 3 { + return nil, fmt.Errorf("invalid NFSd th metric line %q", line) + } + values, err = util.ParseUint64s(parts[1:3]) + } else { + values, err = util.ParseUint64s(parts[1:]) + } + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "rc": + stats.ReplyCache, err = parseReplyCache(values) + case "fh": + stats.FileHandles, err = parseFileHandles(values) + case "io": + stats.InputOutput, err = parseInputOutput(values) + case "th": + stats.Threads, err = parseThreads(values) + case "ra": + stats.ReadAheadCache, err = parseReadAheadCache(values) + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ServerRPC, err = parseServerRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ServerV4Stats, err = parseServerV4Stats(values) + case "proc4ops": + stats.V4Ops, err = parseV4Ops(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 00000000..8717e1fe --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,224 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := os.Open(p.path("cmdline")) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + f, err := os.Open(p.path("comm")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 00000000..e7f6674d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,52 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { + pio := ProcIO{} + + f, err := os.Open(p.path("io")) + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + + return pio, err +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 00000000..b684a5b5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,137 @@ +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int64 + // Maximum size of files that the process may create. + FileSize int64 + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int64 + // Maximum size of the process stack in bytes. + StackSize int64 + // Maximum size of a core file. + CoreFileSize int64 + // Limit of the process's resident set in pages. + ResidentSet int64 + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int64 + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int64 + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int64 + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int64 + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int64 + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int64 + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int64 + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int64 + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int64 + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int64 +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int64, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return i, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 00000000..befdd269 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,55 @@ +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the +// process is a member. +func (p Proc) NewNamespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 00000000..724e271b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,175 @@ +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := os.Open(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 00000000..701f4df6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,219 @@ +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + + f, err := os.Open(fs.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + stat := Stat{} + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return stat, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 00000000..ffe9df50 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldnt parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go new file mode 100644 index 00000000..2bc0ef34 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/parse.go @@ -0,0 +1,330 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseStats parses a Stats from an input io.Reader, using the format +// found in /proc/fs/xfs/stat. +func ParseStats(r io.Reader) (*Stats, error) { + const ( + // Fields parsed into stats structures. + fieldExtentAlloc = "extent_alloc" + fieldAbt = "abt" + fieldBlkMap = "blk_map" + fieldBmbt = "bmbt" + fieldDir = "dir" + fieldTrans = "trans" + fieldIg = "ig" + fieldLog = "log" + fieldRw = "rw" + fieldAttr = "attr" + fieldIcluster = "icluster" + fieldVnodes = "vnodes" + fieldBuf = "buf" + fieldXpc = "xpc" + + // Unimplemented at this time due to lack of documentation. + fieldPushAil = "push_ail" + fieldXstrat = "xstrat" + fieldAbtb2 = "abtb2" + fieldAbtc2 = "abtc2" + fieldBmbt2 = "bmbt2" + fieldIbt2 = "ibt2" + fieldFibt2 = "fibt2" + fieldQm = "qm" + fieldDebug = "debug" + ) + + var xfss Stats + + s := bufio.NewScanner(r) + for s.Scan() { + // Expect at least a string label and a single integer value, ex: + // - abt 0 + // - rw 1 2 + ss := strings.Fields(string(s.Bytes())) + if len(ss) < 2 { + continue + } + label := ss[0] + + // Extended precision counters are uint64 values. + if label == fieldXpc { + us, err := util.ParseUint64s(ss[1:]) + if err != nil { + return nil, err + } + + xfss.ExtendedPrecision, err = extendedPrecisionStats(us) + if err != nil { + return nil, err + } + + continue + } + + // All other counters are uint32 values. + us, err := util.ParseUint32s(ss[1:]) + if err != nil { + return nil, err + } + + switch label { + case fieldExtentAlloc: + xfss.ExtentAllocation, err = extentAllocationStats(us) + case fieldAbt: + xfss.AllocationBTree, err = btreeStats(us) + case fieldBlkMap: + xfss.BlockMapping, err = blockMappingStats(us) + case fieldBmbt: + xfss.BlockMapBTree, err = btreeStats(us) + case fieldDir: + xfss.DirectoryOperation, err = directoryOperationStats(us) + case fieldTrans: + xfss.Transaction, err = transactionStats(us) + case fieldIg: + xfss.InodeOperation, err = inodeOperationStats(us) + case fieldLog: + xfss.LogOperation, err = logOperationStats(us) + case fieldRw: + xfss.ReadWrite, err = readWriteStats(us) + case fieldAttr: + xfss.AttributeOperation, err = attributeOperationStats(us) + case fieldIcluster: + xfss.InodeClustering, err = inodeClusteringStats(us) + case fieldVnodes: + xfss.Vnode, err = vnodeStats(us) + case fieldBuf: + xfss.Buffer, err = bufferStats(us) + } + if err != nil { + return nil, err + } + } + + return &xfss, s.Err() +} + +// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. +func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { + if l := len(us); l != 4 { + return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) + } + + return ExtentAllocationStats{ + ExtentsAllocated: us[0], + BlocksAllocated: us[1], + ExtentsFreed: us[2], + BlocksFreed: us[3], + }, nil +} + +// btreeStats builds a BTreeStats from a slice of uint32s. +func btreeStats(us []uint32) (BTreeStats, error) { + if l := len(us); l != 4 { + return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) + } + + return BTreeStats{ + Lookups: us[0], + Compares: us[1], + RecordsInserted: us[2], + RecordsDeleted: us[3], + }, nil +} + +// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. +func blockMappingStats(us []uint32) (BlockMappingStats, error) { + if l := len(us); l != 7 { + return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) + } + + return BlockMappingStats{ + Reads: us[0], + Writes: us[1], + Unmaps: us[2], + ExtentListInsertions: us[3], + ExtentListDeletions: us[4], + ExtentListLookups: us[5], + ExtentListCompares: us[6], + }, nil +} + +// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. +func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { + if l := len(us); l != 4 { + return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) + } + + return DirectoryOperationStats{ + Lookups: us[0], + Creates: us[1], + Removes: us[2], + Getdents: us[3], + }, nil +} + +// TransactionStats builds a TransactionStats from a slice of uint32s. +func transactionStats(us []uint32) (TransactionStats, error) { + if l := len(us); l != 3 { + return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) + } + + return TransactionStats{ + Sync: us[0], + Async: us[1], + Empty: us[2], + }, nil +} + +// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. +func inodeOperationStats(us []uint32) (InodeOperationStats, error) { + if l := len(us); l != 7 { + return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) + } + + return InodeOperationStats{ + Attempts: us[0], + Found: us[1], + Recycle: us[2], + Missed: us[3], + Duplicate: us[4], + Reclaims: us[5], + AttributeChange: us[6], + }, nil +} + +// LogOperationStats builds a LogOperationStats from a slice of uint32s. +func logOperationStats(us []uint32) (LogOperationStats, error) { + if l := len(us); l != 5 { + return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) + } + + return LogOperationStats{ + Writes: us[0], + Blocks: us[1], + NoInternalBuffers: us[2], + Force: us[3], + ForceSleep: us[4], + }, nil +} + +// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. +func readWriteStats(us []uint32) (ReadWriteStats, error) { + if l := len(us); l != 2 { + return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) + } + + return ReadWriteStats{ + Read: us[0], + Write: us[1], + }, nil +} + +// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. +func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { + if l := len(us); l != 4 { + return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) + } + + return AttributeOperationStats{ + Get: us[0], + Set: us[1], + Remove: us[2], + List: us[3], + }, nil +} + +// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. +func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { + if l := len(us); l != 3 { + return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) + } + + return InodeClusteringStats{ + Iflush: us[0], + Flush: us[1], + FlushInode: us[2], + }, nil +} + +// VnodeStats builds a VnodeStats from a slice of uint32s. +func vnodeStats(us []uint32) (VnodeStats, error) { + // The attribute "Free" appears to not be available on older XFS + // stats versions. Therefore, 7 or 8 elements may appear in + // this slice. + l := len(us) + if l != 7 && l != 8 { + return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) + } + + s := VnodeStats{ + Active: us[0], + Allocate: us[1], + Get: us[2], + Hold: us[3], + Release: us[4], + Reclaim: us[5], + Remove: us[6], + } + + // Skip adding free, unless it is present. The zero value will + // be used in place of an actual count. + if l == 7 { + return s, nil + } + + s.Free = us[7] + return s, nil +} + +// BufferStats builds a BufferStats from a slice of uint32s. +func bufferStats(us []uint32) (BufferStats, error) { + if l := len(us); l != 9 { + return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) + } + + return BufferStats{ + Get: us[0], + Create: us[1], + GetLocked: us[2], + GetLockedWaited: us[3], + BusyLocked: us[4], + MissLocked: us[5], + PageRetries: us[6], + PageFound: us[7], + GetRead: us[8], + }, nil +} + +// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. +func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { + if l := len(us); l != 3 { + return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) + } + + return ExtendedPrecisionStats{ + FlushBytes: us[0], + WriteBytes: us[1], + ReadBytes: us[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go new file mode 100644 index 00000000..d86794b7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xfs provides access to statistics exposed by the XFS filesystem. +package xfs + +// Stats contains XFS filesystem runtime statistics, parsed from +// /proc/fs/xfs/stat. +// +// The names and meanings of each statistic were taken from +// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux +// kernel source. Most counters are uint32s (same data types used in +// xfs_stats.h), but some of the "extended precision stats" are uint64s. +type Stats struct { + // The name of the filesystem used to source these statistics. + // If empty, this indicates aggregated statistics for all XFS + // filesystems on the host. + Name string + + ExtentAllocation ExtentAllocationStats + AllocationBTree BTreeStats + BlockMapping BlockMappingStats + BlockMapBTree BTreeStats + DirectoryOperation DirectoryOperationStats + Transaction TransactionStats + InodeOperation InodeOperationStats + LogOperation LogOperationStats + ReadWrite ReadWriteStats + AttributeOperation AttributeOperationStats + InodeClustering InodeClusteringStats + Vnode VnodeStats + Buffer BufferStats + ExtendedPrecision ExtendedPrecisionStats +} + +// ExtentAllocationStats contains statistics regarding XFS extent allocations. +type ExtentAllocationStats struct { + ExtentsAllocated uint32 + BlocksAllocated uint32 + ExtentsFreed uint32 + BlocksFreed uint32 +} + +// BTreeStats contains statistics regarding an XFS internal B-tree. +type BTreeStats struct { + Lookups uint32 + Compares uint32 + RecordsInserted uint32 + RecordsDeleted uint32 +} + +// BlockMappingStats contains statistics regarding XFS block maps. +type BlockMappingStats struct { + Reads uint32 + Writes uint32 + Unmaps uint32 + ExtentListInsertions uint32 + ExtentListDeletions uint32 + ExtentListLookups uint32 + ExtentListCompares uint32 +} + +// DirectoryOperationStats contains statistics regarding XFS directory entries. +type DirectoryOperationStats struct { + Lookups uint32 + Creates uint32 + Removes uint32 + Getdents uint32 +} + +// TransactionStats contains statistics regarding XFS metadata transactions. +type TransactionStats struct { + Sync uint32 + Async uint32 + Empty uint32 +} + +// InodeOperationStats contains statistics regarding XFS inode operations. +type InodeOperationStats struct { + Attempts uint32 + Found uint32 + Recycle uint32 + Missed uint32 + Duplicate uint32 + Reclaims uint32 + AttributeChange uint32 +} + +// LogOperationStats contains statistics regarding the XFS log buffer. +type LogOperationStats struct { + Writes uint32 + Blocks uint32 + NoInternalBuffers uint32 + Force uint32 + ForceSleep uint32 +} + +// ReadWriteStats contains statistics regarding the number of read and write +// system calls for XFS filesystems. +type ReadWriteStats struct { + Read uint32 + Write uint32 +} + +// AttributeOperationStats contains statistics regarding manipulation of +// XFS extended file attributes. +type AttributeOperationStats struct { + Get uint32 + Set uint32 + Remove uint32 + List uint32 +} + +// InodeClusteringStats contains statistics regarding XFS inode clustering +// operations. +type InodeClusteringStats struct { + Iflush uint32 + Flush uint32 + FlushInode uint32 +} + +// VnodeStats contains statistics regarding XFS vnode operations. +type VnodeStats struct { + Active uint32 + Allocate uint32 + Get uint32 + Hold uint32 + Release uint32 + Reclaim uint32 + Remove uint32 + Free uint32 +} + +// BufferStats contains statistics regarding XFS read/write I/O buffers. +type BufferStats struct { + Get uint32 + Create uint32 + GetLocked uint32 + GetLockedWaited uint32 + BusyLocked uint32 + MissLocked uint32 + PageRetries uint32 + PageFound uint32 + GetRead uint32 +} + +// ExtendedPrecisionStats contains high precision counters used to track the +// total number of bytes read, written, or flushed, during XFS operations. +type ExtendedPrecisionStats struct { + FlushBytes uint64 + WriteBytes uint64 + ReadBytes uint64 +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go deleted file mode 100644 index 3daa8979..00000000 --- a/vendor/golang.org/x/net/idna/idna.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package idna implements IDNA2008 (Internationalized Domain Names for -// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and -// RFC 5894. -package idna // import "golang.org/x/net/idna" - -import ( - "strings" - "unicode/utf8" -) - -// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or -// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 - -// acePrefix is the ASCII Compatible Encoding prefix. -const acePrefix = "xn--" - -// ToASCII converts a domain or domain label to its ASCII form. For example, -// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and -// ToASCII("golang") is "golang". -func ToASCII(s string) (string, error) { - if ascii(s) { - return s, nil - } - labels := strings.Split(s, ".") - for i, label := range labels { - if !ascii(label) { - a, err := encode(acePrefix, label) - if err != nil { - return "", err - } - labels[i] = a - } - } - return strings.Join(labels, "."), nil -} - -// ToUnicode converts a domain or domain label to its Unicode form. For example, -// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and -// ToUnicode("golang") is "golang". -func ToUnicode(s string) (string, error) { - if !strings.Contains(s, acePrefix) { - return s, nil - } - labels := strings.Split(s, ".") - for i, label := range labels { - if strings.HasPrefix(label, acePrefix) { - u, err := decode(label[len(acePrefix):]) - if err != nil { - return "", err - } - labels[i] = u - } - } - return strings.Join(labels, "."), nil -} - -func ascii(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go deleted file mode 100644 index 92e733f6..00000000 --- a/vendor/golang.org/x/net/idna/punycode.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idna - -// This file implements the Punycode algorithm from RFC 3492. - -import ( - "fmt" - "math" - "strings" - "unicode/utf8" -) - -// These parameter values are specified in section 5. -// -// All computation is done with int32s, so that overflow behavior is identical -// regardless of whether int is 32-bit or 64-bit. -const ( - base int32 = 36 - damp int32 = 700 - initialBias int32 = 72 - initialN int32 = 128 - skew int32 = 38 - tmax int32 = 26 - tmin int32 = 1 -) - -// decode decodes a string as specified in section 6.2. -func decode(encoded string) (string, error) { - if encoded == "" { - return "", nil - } - pos := 1 + strings.LastIndex(encoded, "-") - if pos == 1 { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - if pos == len(encoded) { - return encoded[:len(encoded)-1], nil - } - output := make([]rune, 0, len(encoded)) - if pos != 0 { - for _, r := range encoded[:pos-1] { - output = append(output, r) - } - } - i, n, bias := int32(0), initialN, initialBias - for pos < len(encoded) { - oldI, w := i, int32(1) - for k := base; ; k += base { - if pos == len(encoded) { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - digit, ok := decodeDigit(encoded[pos]) - if !ok { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - pos++ - i += digit * w - if i < 0 { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if digit < t { - break - } - w *= base - t - if w >= math.MaxInt32/base { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - } - x := int32(len(output) + 1) - bias = adapt(i-oldI, x, oldI == 0) - n += i / x - i %= x - if n > utf8.MaxRune || len(output) >= 1024 { - return "", fmt.Errorf("idna: invalid label %q", encoded) - } - output = append(output, 0) - copy(output[i+1:], output[i:]) - output[i] = n - i++ - } - return string(output), nil -} - -// encode encodes a string as specified in section 6.3 and prepends prefix to -// the result. -// -// The "while h < length(input)" line in the specification becomes "for -// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. -func encode(prefix, s string) (string, error) { - output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) - copy(output, prefix) - delta, n, bias := int32(0), initialN, initialBias - b, remaining := int32(0), int32(0) - for _, r := range s { - if r < 0x80 { - b++ - output = append(output, byte(r)) - } else { - remaining++ - } - } - h := b - if b > 0 { - output = append(output, '-') - } - for remaining != 0 { - m := int32(0x7fffffff) - for _, r := range s { - if m > r && r >= n { - m = r - } - } - delta += (m - n) * (h + 1) - if delta < 0 { - return "", fmt.Errorf("idna: invalid label %q", s) - } - n = m - for _, r := range s { - if r < n { - delta++ - if delta < 0 { - return "", fmt.Errorf("idna: invalid label %q", s) - } - continue - } - if r > n { - continue - } - q := delta - for k := base; ; k += base { - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if q < t { - break - } - output = append(output, encodeDigit(t+(q-t)%(base-t))) - q = (q - t) / (base - t) - } - output = append(output, encodeDigit(q)) - bias = adapt(delta, h+1, h == b) - delta = 0 - h++ - remaining-- - } - delta++ - n++ - } - return string(output), nil -} - -func decodeDigit(x byte) (digit int32, ok bool) { - switch { - case '0' <= x && x <= '9': - return int32(x - ('0' - 26)), true - case 'A' <= x && x <= 'Z': - return int32(x - 'A'), true - case 'a' <= x && x <= 'z': - return int32(x - 'a'), true - } - return 0, false -} - -func encodeDigit(digit int32) byte { - switch { - case 0 <= digit && digit < 26: - return byte(digit + 'a') - case 26 <= digit && digit < 36: - return byte(digit + ('0' - 26)) - } - panic("idna: internal error in punycode encoding") -} - -// adapt is the bias adaptation function specified in section 6.1. -func adapt(delta, numPoints int32, firstTime bool) int32 { - if firstTime { - delta /= damp - } else { - delta /= 2 - } - delta += delta / numPoints - k := int32(0) - for delta > ((base-tmin)*tmax)/2 { - delta /= base - tmin - k += base - } - return k + (base-tmin+1)*delta/(delta+skew) -} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go deleted file mode 100644 index 5c8d7b5f..00000000 --- a/vendor/golang.org/x/net/publicsuffix/gen.go +++ /dev/null @@ -1,663 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates table.go and table_test.go. -// Invoke as: -// -// go run gen.go -version "xxx" >table.go -// go run gen.go -version "xxx" -test >table_test.go -// -// Pass -v to print verbose progress information. -// -// The version is derived from information found at -// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat -// -// To fetch a particular git revision, such as 5c70ccd250, pass -// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "go/format" - "io" - "net/http" - "os" - "regexp" - "sort" - "strings" - - "golang.org/x/net/idna" -) - -const ( - // These sum of these four values must be no greater than 32. - nodesBitsChildren = 9 - nodesBitsICANN = 1 - nodesBitsTextOffset = 15 - nodesBitsTextLength = 6 - - // These sum of these four values must be no greater than 32. - childrenBitsWildcard = 1 - childrenBitsNodeType = 2 - childrenBitsHi = 14 - childrenBitsLo = 14 -) - -var ( - maxChildren int - maxTextOffset int - maxTextLength int - maxHi uint32 - maxLo uint32 -) - -func max(a, b int) int { - if a < b { - return b - } - return a -} - -func u32max(a, b uint32) uint32 { - if a < b { - return b - } - return a -} - -const ( - nodeTypeNormal = 0 - nodeTypeException = 1 - nodeTypeParentOnly = 2 - numNodeType = 3 -) - -func nodeTypeStr(n int) string { - switch n { - case nodeTypeNormal: - return "+" - case nodeTypeException: - return "!" - case nodeTypeParentOnly: - return "o" - } - panic("unreachable") -} - -var ( - labelEncoding = map[string]uint32{} - labelsList = []string{} - labelsMap = map[string]bool{} - rules = []string{} - - // validSuffix is used to check that the entries in the public suffix list - // are in canonical form (after Punycode encoding). Specifically, capital - // letters are not allowed. - validSuffix = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) - - subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") - url = flag.String("url", - "https://publicsuffix.org/list/effective_tld_names.dat", - "URL of the publicsuffix.org list. If empty, stdin is read instead") - v = flag.Bool("v", false, "verbose output (to stderr)") - version = flag.String("version", "", "the effective_tld_names.dat version") - test = flag.Bool("test", false, "generate table_test.go") -) - -func main() { - if err := main1(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main1() error { - flag.Parse() - if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { - return fmt.Errorf("not enough bits to encode the nodes table") - } - if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { - return fmt.Errorf("not enough bits to encode the children table") - } - if *version == "" { - return fmt.Errorf("-version was not specified") - } - var r io.Reader = os.Stdin - if *url != "" { - res, err := http.Get(*url) - if err != nil { - return err - } - if res.StatusCode != http.StatusOK { - return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) - } - r = res.Body - defer res.Body.Close() - } - - var root node - icann := false - buf := new(bytes.Buffer) - br := bufio.NewReader(r) - for { - s, err := br.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } - return err - } - s = strings.TrimSpace(s) - if strings.Contains(s, "BEGIN ICANN DOMAINS") { - icann = true - continue - } - if strings.Contains(s, "END ICANN DOMAINS") { - icann = false - continue - } - if s == "" || strings.HasPrefix(s, "//") { - continue - } - s, err = idna.ToASCII(s) - if err != nil { - return err - } - if !validSuffix.MatchString(s) { - return fmt.Errorf("bad publicsuffix.org list data: %q", s) - } - - if *subset { - switch { - case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): - case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): - case s == "ao" || strings.HasSuffix(s, ".ao"): - case s == "ar" || strings.HasSuffix(s, ".ar"): - case s == "arpa" || strings.HasSuffix(s, ".arpa"): - case s == "cy" || strings.HasSuffix(s, ".cy"): - case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): - case s == "jp": - case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): - case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): - case s == "om" || strings.HasSuffix(s, ".om"): - case s == "uk" || strings.HasSuffix(s, ".uk"): - case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): - case s == "tw" || strings.HasSuffix(s, ".tw"): - case s == "zw" || strings.HasSuffix(s, ".zw"): - case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): - // xn--p1ai is Russian-Cyrillic "рф". - default: - continue - } - } - - rules = append(rules, s) - - nt, wildcard := nodeTypeNormal, false - switch { - case strings.HasPrefix(s, "*."): - s, nt = s[2:], nodeTypeParentOnly - wildcard = true - case strings.HasPrefix(s, "!"): - s, nt = s[1:], nodeTypeException - } - labels := strings.Split(s, ".") - for n, i := &root, len(labels)-1; i >= 0; i-- { - label := labels[i] - n = n.child(label) - if i == 0 { - if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { - n.nodeType = nt - } - n.icann = n.icann && icann - n.wildcard = n.wildcard || wildcard - } - labelsMap[label] = true - } - } - labelsList = make([]string, 0, len(labelsMap)) - for label := range labelsMap { - labelsList = append(labelsList, label) - } - sort.Strings(labelsList) - - p := printReal - if *test { - p = printTest - } - if err := p(buf, &root); err != nil { - return err - } - - b, err := format.Source(buf.Bytes()) - if err != nil { - return err - } - _, err = os.Stdout.Write(b) - return err -} - -func printTest(w io.Writer, n *node) error { - fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") - fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") - for _, rule := range rules { - fmt.Fprintf(w, "%q,\n", rule) - } - fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") - if err := n.walk(w, printNodeLabel); err != nil { - return err - } - fmt.Fprintf(w, "}\n") - return nil -} - -func printReal(w io.Writer, n *node) error { - const header = `// generated by go run gen.go; DO NOT EDIT - -package publicsuffix - -const version = %q - -const ( - nodesBitsChildren = %d - nodesBitsICANN = %d - nodesBitsTextOffset = %d - nodesBitsTextLength = %d - - childrenBitsWildcard = %d - childrenBitsNodeType = %d - childrenBitsHi = %d - childrenBitsLo = %d -) - -const ( - nodeTypeNormal = %d - nodeTypeException = %d - nodeTypeParentOnly = %d -) - -// numTLD is the number of top level domains. -const numTLD = %d - -` - fmt.Fprintf(w, header, *version, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, - nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) - - text := combineText(labelsList) - if text == "" { - return fmt.Errorf("internal error: makeText returned no text") - } - for _, label := range labelsList { - offset, length := strings.Index(text, label), len(label) - if offset < 0 { - return fmt.Errorf("internal error: could not find %q in text %q", label, text) - } - maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) - if offset >= 1<= 1< 64 { - n, plus = 64, " +" - } - fmt.Fprintf(w, "%q%s\n", text[:n], plus) - text = text[n:] - } - - if err := n.walk(w, assignIndexes); err != nil { - return err - } - - fmt.Fprintf(w, ` - -// nodes is the list of nodes. Each node is represented as a uint32, which -// encodes the node's children, wildcard bit and node type (as an index into -// the children array), ICANN bit and text. -// -// In the //-comment after each node's data, the nodes indexes of the children -// are formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The -// nodeType is printed as + for normal, ! for exception, and o for parent-only -// nodes that have children but don't match a domain label in their own right. -// An I denotes an ICANN domain. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] children index -// [%2d bits] ICANN bit -// [%2d bits] text index -// [%2d bits] text length -var nodes = [...]uint32{ -`, - 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, - nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) - if err := n.walk(w, printNode); err != nil { - return err - } - fmt.Fprintf(w, `} - -// children is the list of nodes' children, the parent's wildcard bit and the -// parent's node type. If a node has no children then their children index -// will be in the range [0, 6), depending on the wildcard bit and node type. -// -// The layout within the uint32, from MSB to LSB, is: -// [%2d bits] unused -// [%2d bits] wildcard bit -// [%2d bits] node type -// [%2d bits] high nodes index (exclusive) of children -// [%2d bits] low nodes index (inclusive) of children -var children=[...]uint32{ -`, - 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, - childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) - for i, c := range childrenEncoding { - s := "---------------" - lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 - fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", - c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) - } - fmt.Fprintf(w, "}\n\n") - fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { - ss = ss[1:] - } - return ss -} - -// crush combines a list of strings, taking advantage of overlaps. It returns a -// single string that contains each input string as a substring. -func crush(ss []string) string { - maxLabelLen := 0 - for _, s := range ss { - if maxLabelLen < len(s) { - maxLabelLen = len(s) - } - } - - for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { - prefixes := makePrefixMap(ss, prefixLen) - for i, s := range ss { - if len(s) <= prefixLen { - continue - } - mergeLabel(ss, i, prefixLen, prefixes) - } - } - - return strings.Join(ss, "") -} - -// mergeLabel merges the label at ss[i] with the first available matching label -// in prefixMap, where the last "prefixLen" characters in ss[i] match the first -// "prefixLen" characters in the matching label. -// It will merge ss[i] repeatedly until no more matches are available. -// All matching labels merged into ss[i] are replaced by "". -func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { - s := ss[i] - suffix := s[len(s)-prefixLen:] - for _, j := range prefixes[suffix] { - // Empty strings mean "already used." Also avoid merging with self. - if ss[j] == "" || i == j { - continue - } - if *v { - fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", - prefixLen, i, j, ss[i], ss[j], suffix) - } - ss[i] += ss[j][prefixLen:] - ss[j] = "" - // ss[i] has a new suffix, so merge again if possible. - // Note: we only have to merge again at the same prefix length. Shorter - // prefix lengths will be handled in the next iteration of crush's for loop. - // Can there be matches for longer prefix lengths, introduced by the merge? - // I believe that any such matches would by necessity have been eliminated - // during substring removal or merged at a higher prefix length. For - // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" - // would yield "abcde", which could be merged with "bcdef." However, in - // practice "cde" would already have been elimintated by removeSubstrings. - mergeLabel(ss, i, prefixLen, prefixes) - return - } -} - -// prefixMap maps from a prefix to a list of strings containing that prefix. The -// list of strings is represented as indexes into a slice of strings stored -// elsewhere. -type prefixMap map[string][]int - -// makePrefixMap constructs a prefixMap from a slice of strings. -func makePrefixMap(ss []string, prefixLen int) prefixMap { - prefixes := make(prefixMap) - for i, s := range ss { - // We use < rather than <= because if a label matches on a prefix equal to - // its full length, that's actually a substring match handled by - // removeSubstrings. - if prefixLen < len(s) { - prefix := s[:prefixLen] - prefixes[prefix] = append(prefixes[prefix], i) - } - } - - return prefixes -} From 8b93700d99a9e7e4bfe66190850af27eca5d90f7 Mon Sep 17 00:00:00 2001 From: Deshi Xiao Date: Wed, 28 Feb 2018 06:46:06 +0800 Subject: [PATCH 088/276] fix out of date cli arg for registry version Signed-off-by: Deshi Xiao --- BUILDING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILDING.md b/BUILDING.md index c5982818..2981d016 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -108,7 +108,7 @@ directory. This includes formatting, vetting, linting, building, testing and generating tagged binaries. We can verify this worked by running the registry binary generated in the "./bin" directory: - $ ./bin/registry -version + $ ./bin/registry --version ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m ### Optional build tags From f2805894c828f40f793abe34f187a1316505d4b5 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 1 Mar 2018 11:25:05 -0800 Subject: [PATCH 089/276] Fix minor "Challanges" typo Signed-off-by: Tianon Gravi --- registry/client/auth/challenge/authchallenge.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/registry/client/auth/challenge/authchallenge.go b/registry/client/auth/challenge/authchallenge.go index c9bdfc35..6e3f1ccc 100644 --- a/registry/client/auth/challenge/authchallenge.go +++ b/registry/client/auth/challenge/authchallenge.go @@ -45,13 +45,13 @@ type Manager interface { // to a backend. func NewSimpleManager() Manager { return &simpleManager{ - Challanges: make(map[string][]Challenge), + Challenges: make(map[string][]Challenge), } } type simpleManager struct { sync.RWMutex - Challanges map[string][]Challenge + Challenges map[string][]Challenge } func normalizeURL(endpoint *url.URL) { @@ -64,7 +64,7 @@ func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { m.RLock() defer m.RUnlock() - challenges := m.Challanges[endpoint.String()] + challenges := m.Challenges[endpoint.String()] return challenges, nil } @@ -82,7 +82,7 @@ func (m *simpleManager) AddResponse(resp *http.Response) error { m.Lock() defer m.Unlock() - m.Challanges[urlCopy.String()] = challenges + m.Challenges[urlCopy.String()] = challenges return nil } From fc1d3647c6a148a0dbab2c6c4c20a2c1b7874bf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?elsanli=28=E6=9D=8E=E6=A5=A0=29?= Date: Thu, 18 Jan 2018 18:26:54 +0800 Subject: [PATCH 090/276] Added ignore event typs into notifications MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: elsanli(李楠) --- configuration/configuration.go | 7 ++++++ configuration/configuration_test.go | 14 +++++++++++ docs/configuration.md | 18 ++++++++++++++ notifications/endpoint.go | 5 +++- notifications/sinks.go | 37 +++++++++++++++++++++-------- notifications/sinks_test.go | 22 ++++++++++------- registry/handlers/app.go | 1 + 7 files changed, 84 insertions(+), 20 deletions(-) diff --git a/configuration/configuration.go b/configuration/configuration.go index cdc996b9..708c4b51 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -552,6 +552,13 @@ type Endpoint struct { Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure Backoff time.Duration `yaml:"backoff"` // backoff duration IgnoredMediaTypes []string `yaml:"ignoredmediatypes"` // target media types to ignore + Ignore Ignore `yaml:"ignore"` // ignore event types +} + +//Ignore configures mediaTypes and actions of the event, that it won't be propagated +type Ignore struct { + MediaTypes []string `yaml:"mediatypes"` // target media types to ignore + Actions []string `yaml:"actions"` // ignore action types } // Reporting defines error reporting methods. diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index 3e1583dd..a8ec640e 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -63,6 +63,10 @@ var configStruct = Configuration{ "Authorization": []string{"Bearer "}, }, IgnoredMediaTypes: []string{"application/octet-stream"}, + Ignore: Ignore{ + MediaTypes: []string{"application/octet-stream"}, + Actions: []string{"pull"}, + }, }, }, }, @@ -142,6 +146,11 @@ notifications: Authorization: [Bearer ] ignoredmediatypes: - application/octet-stream + ignore: + mediatypes: + - application/octet-stream + actions: + - pull reporting: bugsnag: apikey: BugsnagApiKey @@ -170,6 +179,11 @@ notifications: Authorization: [Bearer ] ignoredmediatypes: - application/octet-stream + ignore: + mediatypes: + - application/octet-stream + actions: + - pull http: headers: X-Content-Type-Options: [nosniff] diff --git a/docs/configuration.md b/docs/configuration.md index 807353cc..3eacfc50 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -232,6 +232,11 @@ notifications: backoff: 1s ignoredmediatypes: - application/octet-stream + ignore: + mediatypes: + - application/octet-stream + actions: + - pull redis: addr: localhost:6379 password: asecret @@ -837,6 +842,11 @@ notifications: backoff: 1s ignoredmediatypes: - application/octet-stream + ignore: + mediatypes: + - application/octet-stream + actions: + - pull ``` The notifications option is **optional** and currently may contain a single @@ -857,6 +867,14 @@ accept event notifications. | `threshold` | yes | An integer specifying how long to wait before backing off a failure. | | `backoff` | yes | How long the system backs off before retrying after a failure. A positive integer and an optional suffix indicating the unit of time, which may be `ns`, `us`, `ms`, `s`, `m`, or `h`. If you omit the unit of time, `ns` is used. | | `ignoredmediatypes`|no| A list of target media types to ignore. Events with these target media types are not published to the endpoint. | +| `ignore` |no| Events with these mediatypes or actions are not published to the endpoint. | + +#### `ignore` +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `mediatypes`|no| A list of target media types to ignore. Events with these target media types are not published to the endpoint. | +| `actions` |no| A list of actions to ignore. Events with these actions are not published to the endpoint. | + ## `redis` diff --git a/notifications/endpoint.go b/notifications/endpoint.go index 44d0f6d7..285995ec 100644 --- a/notifications/endpoint.go +++ b/notifications/endpoint.go @@ -1,6 +1,7 @@ package notifications import ( + "github.com/docker/distribution/configuration" "net/http" "time" ) @@ -14,6 +15,7 @@ type EndpointConfig struct { Backoff time.Duration IgnoredMediaTypes []string Transport *http.Transport `json:"-"` + Ignore configuration.Ignore } // defaults set any zero-valued fields to a reasonable default. @@ -63,7 +65,8 @@ func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { endpoint.Transport, endpoint.metrics.httpStatusListener()) endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) - endpoint.Sink = newIgnoredMediaTypesSink(endpoint.Sink, config.IgnoredMediaTypes) + mediaTypes := append(config.Ignore.MediaTypes, config.IgnoredMediaTypes...) + endpoint.Sink = newIgnoredSink(endpoint.Sink, mediaTypes, config.Ignore.Actions) register(&endpoint) return &endpoint diff --git a/notifications/sinks.go b/notifications/sinks.go index e1523df7..14b692a3 100644 --- a/notifications/sinks.go +++ b/notifications/sinks.go @@ -210,14 +210,15 @@ func (eq *eventQueue) next() []Event { return block } -// ignoredMediaTypesSink discards events with ignored target media types and +// ignoredSink discards events with ignored target media types and actions. // passes the rest along. -type ignoredMediaTypesSink struct { +type ignoredSink struct { Sink - ignored map[string]bool + ignoreMediaTypes map[string]bool + ignoreActions map[string]bool } -func newIgnoredMediaTypesSink(sink Sink, ignored []string) Sink { +func newIgnoredSink(sink Sink, ignored []string, ignoreActions []string) Sink { if len(ignored) == 0 { return sink } @@ -227,25 +228,41 @@ func newIgnoredMediaTypesSink(sink Sink, ignored []string) Sink { ignoredMap[mediaType] = true } - return &ignoredMediaTypesSink{ - Sink: sink, - ignored: ignoredMap, + ignoredActionsMap := make(map[string]bool) + for _, action := range ignoreActions { + ignoredActionsMap[action] = true + } + + return &ignoredSink{ + Sink: sink, + ignoreMediaTypes: ignoredMap, + ignoreActions: ignoredActionsMap, } } // Write discards events with ignored target media types and passes the rest // along. -func (imts *ignoredMediaTypesSink) Write(events ...Event) error { +func (imts *ignoredSink) Write(events ...Event) error { var kept []Event for _, e := range events { - if !imts.ignored[e.Target.MediaType] { + if !imts.ignoreMediaTypes[e.Target.MediaType] { kept = append(kept, e) } } if len(kept) == 0 { return nil } - return imts.Sink.Write(kept...) + + var results []Event + for _, e := range kept { + if !imts.ignoreActions[e.Action] { + results = append(results, e) + } + } + if len(results) == 0 { + return nil + } + return imts.Sink.Write(results...) } // retryingSink retries the write until success or an ErrSinkClosed is diff --git a/notifications/sinks_test.go b/notifications/sinks_test.go index 2a85c4f4..9613fe8a 100644 --- a/notifications/sinks_test.go +++ b/notifications/sinks_test.go @@ -113,25 +113,29 @@ func TestEventQueue(t *testing.T) { } } -func TestIgnoredMediaTypesSink(t *testing.T) { +func TestIgnoredSink(t *testing.T) { blob := createTestEvent("push", "library/test", "blob") - manifest := createTestEvent("push", "library/test", "manifest") + manifest := createTestEvent("pull", "library/test", "manifest") type testcase struct { - ignored []string - expected []Event + ignoreMediaTypes []string + ignoreActions []string + expected []Event } cases := []testcase{ - {nil, []Event{blob, manifest}}, - {[]string{"other"}, []Event{blob, manifest}}, - {[]string{"blob"}, []Event{manifest}}, - {[]string{"blob", "manifest"}, nil}, + {nil, nil, []Event{blob, manifest}}, + {[]string{"other"}, []string{"other"}, []Event{blob, manifest}}, + {[]string{"blob"}, []string{"other"}, []Event{manifest}}, + {[]string{"blob", "manifest"}, []string{"other"}, nil}, + {[]string{"other"}, []string{"push"}, []Event{manifest}}, + {[]string{"other"}, []string{"pull"}, []Event{blob}}, + {[]string{"other"}, []string{"pull", "push"}, nil}, } for _, c := range cases { ts := &testSink{} - s := newIgnoredMediaTypesSink(ts, c.ignored) + s := newIgnoredSink(ts, c.ignoreMediaTypes, c.ignoreActions) if err := s.Write(blob, manifest); err != nil { t.Fatalf("error writing event: %v", err) diff --git a/registry/handlers/app.go b/registry/handlers/app.go index e38050cb..0243dd5e 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -439,6 +439,7 @@ func (app *App) configureEvents(configuration *configuration.Configuration) { Backoff: endpoint.Backoff, Headers: endpoint.Headers, IgnoredMediaTypes: endpoint.IgnoredMediaTypes, + Ignore: endpoint.Ignore, }) sinks = append(sinks, endpoint) From 492844e09c8b0a57d11071a527449c6e0d26f047 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Min=C3=A1=C5=99?= Date: Tue, 13 Mar 2018 15:23:47 +0100 Subject: [PATCH 091/276] redis: delete right size attribute MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Michal Minář --- registry/storage/cache/redis/redis.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/registry/storage/cache/redis/redis.go b/registry/storage/cache/redis/redis.go index 36c6ac60..550d703b 100644 --- a/registry/storage/cache/redis/redis.go +++ b/registry/storage/cache/redis/redis.go @@ -72,7 +72,7 @@ func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.D defer conn.Close() // Not atomic in redis <= 2.3 - reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype") + reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype") if err != nil { return err } From fb7b0ddfc3aad9abf55247095e913e218f4cd5dc Mon Sep 17 00:00:00 2001 From: Justin Cormack Date: Mon, 26 Mar 2018 15:46:07 -0700 Subject: [PATCH 092/276] Remove gorilla/context from vendor.conf This has been unused since we switched to Go context. Signed-off-by: Justin Cormack --- vendor.conf | 3 +- vendor/github.com/gorilla/context/LICENSE | 27 ---- vendor/github.com/gorilla/context/README.md | 7 - vendor/github.com/gorilla/context/context.go | 143 ------------------- vendor/github.com/gorilla/context/doc.go | 82 ----------- 5 files changed, 1 insertion(+), 261 deletions(-) delete mode 100644 vendor/github.com/gorilla/context/LICENSE delete mode 100644 vendor/github.com/gorilla/context/README.md delete mode 100644 vendor/github.com/gorilla/context/context.go delete mode 100644 vendor/github.com/gorilla/context/doc.go diff --git a/vendor.conf b/vendor.conf index 504c4191..d0ebadf8 100644 --- a/vendor.conf +++ b/vendor.conf @@ -15,7 +15,6 @@ github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 -github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 @@ -47,4 +46,4 @@ gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git -github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb \ No newline at end of file +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/gorilla/context/LICENSE deleted file mode 100644 index 0e5fb872..00000000 --- a/vendor/github.com/gorilla/context/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md deleted file mode 100644 index c60a31b0..00000000 --- a/vendor/github.com/gorilla/context/README.md +++ /dev/null @@ -1,7 +0,0 @@ -context -======= -[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) - -gorilla/context is a general purpose registry for global request variables. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go deleted file mode 100644 index 81cb128b..00000000 --- a/vendor/github.com/gorilla/context/context.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "sync" - "time" -) - -var ( - mutex sync.RWMutex - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) -) - -// Set stores a value for a given key in a given request. -func Set(r *http.Request, key, val interface{}) { - mutex.Lock() - if data[r] == nil { - data[r] = make(map[interface{}]interface{}) - datat[r] = time.Now().Unix() - } - data[r][key] = val - mutex.Unlock() -} - -// Get returns a value stored for a given key in a given request. -func Get(r *http.Request, key interface{}) interface{} { - mutex.RLock() - if ctx := data[r]; ctx != nil { - value := ctx[key] - mutex.RUnlock() - return value - } - mutex.RUnlock() - return nil -} - -// GetOk returns stored value and presence state like multi-value return of map access. -func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.RLock() - if _, ok := data[r]; ok { - value, ok := data[r][key] - mutex.RUnlock() - return value, ok - } - mutex.RUnlock() - return nil, false -} - -// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. -func GetAll(r *http.Request) map[interface{}]interface{} { - mutex.RLock() - if context, ok := data[r]; ok { - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result - } - mutex.RUnlock() - return nil -} - -// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if -// the request was registered. -func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { - mutex.RLock() - context, ok := data[r] - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result, ok -} - -// Delete removes a value stored for a given key in a given request. -func Delete(r *http.Request, key interface{}) { - mutex.Lock() - if data[r] != nil { - delete(data[r], key) - } - mutex.Unlock() -} - -// Clear removes all values stored for a given request. -// -// This is usually called by a handler wrapper to clean up request -// variables at the end of a request lifetime. See ClearHandler(). -func Clear(r *http.Request) { - mutex.Lock() - clear(r) - mutex.Unlock() -} - -// clear is Clear without the lock. -func clear(r *http.Request) { - delete(data, r) - delete(datat, r) -} - -// Purge removes request data stored for longer than maxAge, in seconds. -// It returns the amount of requests removed. -// -// If maxAge <= 0, all request data is removed. -// -// This is only used for sanity check: in case context cleaning was not -// properly set some request data can be kept forever, consuming an increasing -// amount of memory. In case this is detected, Purge() must be called -// periodically until the problem is fixed. -func Purge(maxAge int) int { - mutex.Lock() - count := 0 - if maxAge <= 0 { - count = len(data) - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) - } else { - min := time.Now().Unix() - int64(maxAge) - for r := range data { - if datat[r] < min { - clear(r) - count++ - } - } - } - mutex.Unlock() - return count -} - -// ClearHandler wraps an http.Handler and clears request values at the end -// of a request lifetime. -func ClearHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer Clear(r) - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go deleted file mode 100644 index 73c74003..00000000 --- a/vendor/github.com/gorilla/context/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package context stores values shared during a request lifetime. - -For example, a router can set variables extracted from the URL and later -application handlers can access those values, or it can be used to store -sessions values to be saved at the end of a request. There are several -others common uses. - -The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: - - http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 - -Here's the basic usage: first define the keys that you will need. The key -type is interface{} so a key can be of any type that supports equality. -Here we define a key using a custom int type to avoid name collisions: - - package foo - - import ( - "github.com/gorilla/context" - ) - - type key int - - const MyKey key = 0 - -Then set a variable. Variables are bound to an http.Request object, so you -need a request instance to set a value: - - context.Set(r, MyKey, "bar") - -The application can later access the variable using the same key you provided: - - func MyHandler(w http.ResponseWriter, r *http.Request) { - // val is "bar". - val := context.Get(r, foo.MyKey) - - // returns ("bar", true) - val, ok := context.GetOk(r, foo.MyKey) - // ... - } - -And that's all about the basic usage. We discuss some other ideas below. - -Any type can be stored in the context. To enforce a given type, make the key -private and wrap Get() and Set() to accept and return values of a specific -type: - - type key int - - const mykey key = 0 - - // GetMyKey returns a value for this package from the request values. - func GetMyKey(r *http.Request) SomeType { - if rv := context.Get(r, mykey); rv != nil { - return rv.(SomeType) - } - return nil - } - - // SetMyKey sets a value for this package in the request values. - func SetMyKey(r *http.Request, val SomeType) { - context.Set(r, mykey, val) - } - -Variables must be cleared at the end of a request, to remove all values -that were stored. This can be done in an http.Handler, after a request was -served. Just call Clear() passing the request: - - context.Clear(r) - -...or use ClearHandler(), which conveniently wraps an http.Handler to clear -variables at the end of a request lifetime. - -The Routers from the packages gorilla/mux and gorilla/pat call Clear() -so if you are using either of them you don't need to clear the context manually. -*/ -package context From b4dd9b43763ad8073e7302bc10585f4d1e227dc2 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 21 May 2018 16:00:48 -0700 Subject: [PATCH 093/276] Update certificates Set expiration to 10 years Signed-off-by: Derek McGowan --- .../malevolent-certs/ca.pem | 32 ++++---- .../malevolent-certs/localregistry.cert | 34 ++++----- .../malevolent-certs/localregistry.key | 50 ++++++------- .../nginx/ssl/registry-ca+ca.pem | 43 ++++------- .../nginx/ssl/registry-ca+client-cert.pem | 43 ++++------- .../nginx/ssl/registry-ca+client-key.pem | 74 +++++++------------ .../nginx/ssl/registry-ca+localhost-cert.pem | 44 +++++------ .../nginx/ssl/registry-ca+localhost-key.pem | 74 +++++++------------ .../ssl/registry-ca+localregistry-cert.pem | 45 +++++------ .../ssl/registry-ca+localregistry-key.pem | 74 +++++++------------ .../nginx/ssl/registry-noca+client-cert.pem | 43 ++++------- .../nginx/ssl/registry-noca+client-key.pem | 74 +++++++------------ .../ssl/registry-noca+localhost-cert.pem | 44 +++++------ .../nginx/ssl/registry-noca+localhost-key.pem | 74 +++++++------------ .../ssl/registry-noca+localregistry-cert.pem | 45 +++++------ .../ssl/registry-noca+localregistry-key.pem | 74 +++++++------------ 16 files changed, 324 insertions(+), 543 deletions(-) diff --git a/contrib/docker-integration/malevolent-certs/ca.pem b/contrib/docker-integration/malevolent-certs/ca.pem index 93325986..b1675ab9 100644 --- a/contrib/docker-integration/malevolent-certs/ca.pem +++ b/contrib/docker-integration/malevolent-certs/ca.pem @@ -1,18 +1,18 @@ -----BEGIN CERTIFICATE----- -MIIC9TCCAd+gAwIBAgIQKQTGjKpSVBW78ef0fOcxRTALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDgyMDIz -MjE0OVoXDTE4MDgwNDIzMjE0OVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV -BAMTCFF1aWNrVExTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwoPM -xiDZK6Fwy5r3waRkfJHhyZZH828Jyj+nz5UVkMyOM/xN6MgJ2w911hTj1wSXG2n3 -AohF3gTFNrDYh4j2qRZnixDrOM5GBm2/KJbyfBIYkrR45yLfjidO7MRnhaPZ5Fov -l+RKwNBXP4Q2mUe7q9FM457Rm8hAcqXP04AJT20m1QSYQivDgxsDxuAQte3VEy1E -0j0CwUKoFHT6MHOnDPEZbc4r1+ba34WBM1Sc5KXyV2JlbtU07J4hACYWVsD7vQCl -VFlZNE4E35ahMDZ+ODLal9PAT8ARLdAtjvRWrT+h8qZ4Yfwt/sGF1K4CAkTP3H5p -uMkJG56zmqIEYeHMuwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAKQwDwYDVR0TAQH/ -BAUwAwEB/zALBgkqhkiG9w0BAQsDggEBALpieTckiPEeb3rTAWl7waDPLPOIhS5C -XHVfOm7cPmRn3pT2VuR8y74U7a1uOkYMgJnCWb8lSXhbqC89FatLnAhKqo4I9oD8 -2BXgYeIpP5/OWBcjzmsMnowrvokc0chAmAR0Ux6AP0eX9amC0lGMuTHdw3+is0AR -lhoImOUPXvgMH7W2RimpSgnX0R5wKqfuGwMfbGa0xhWBZ+wekAKcU8b+pIHDyX0c -EQcir2y8/lVjECXSAIlV6iasPQ3hm1sd0xq1hx4yrwYFvQb7yEhOXbK24HLr/20D -RRmEOuS8gg2XtUFv66z/VOw/nUleIg9GAuWDJaiu9frmIma4/tIY4qY= +MIIC+TCCAeGgAwIBAgIQJMzVQNYVNTbh36kZUytWiDANBgkqhkiG9w0BAQsFADAm +MREwDwYDVQQKEwhRdWlja1RMUzERMA8GA1UEAxMIUXVpY2tUTFMwHhcNMTgwNTIx +MjI1OTA2WhcNMjgwODI2MjI1OTA2WjAmMREwDwYDVQQKEwhRdWlja1RMUzERMA8G +A1UEAxMIUXVpY2tUTFMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCe +8rEU8xHh6BMYVRz/KhFftKSxS4dxJi2LoNN4fxzY6EgHNfBACt2MhIWaUSHf2YkF +NsS/T7qZWq23NEuIJYUUwbJRAh/iQsEhCI56eV+aJX+DGd2SQQNKdx1Pt528LNws +n8Ci8rEHTe6i2/U7n/DLqa32BWF3aShsVrchRgpizXezS7GLyFmhv0hi0zRKJgDG +JebLeqe/BUtEOsS/Oa65NQTEO/5EZBzM74+4eRo5zyp9Uvw4edmOrXRXK1fK9gP3 +Fq/jz9+8b5eUd9vl0e9z/xTqMdicYZOUHuUtxM3hXAkkxcaVJqqqDe6URbJHpbaN +8Vt/p/csFXMWj3oSokvDAgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMB +Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCC3NiX+2Qk3WB+TRNDPCtQ7Pw+ +o31SSqfF8m3fevT4mdrJqFAF4qUpDwgV9/9EkU4UBoIq03S91Dk/No0jR3VAzzRA +h3+ul/7u08JriS/ZgVediodi7H8xeCz3nvZfAwCP2ZmHzDGp39Uhc3L3WFZImZuV +fCDeSWF3c5CjJbdUuCYYFy6LwSFLPoBXZaNBL19XP9btJtjbNTm77PZJ4cELTQ+U +r5Ofw9D9mCCYrapmprw7Fw9wdE+iLL9EJCHAj7L8UYshF4+7O7Jv3ZatySMWPbjS +nIa2+eKl/sfvRvLZWV9dUSObVsm/bpv8bsHIKp4bYl+IDb2aoSWnw4eZQHDJ -----END CERTIFICATE----- diff --git a/contrib/docker-integration/malevolent-certs/localregistry.cert b/contrib/docker-integration/malevolent-certs/localregistry.cert index 071e7a2b..cbb5f2ff 100644 --- a/contrib/docker-integration/malevolent-certs/localregistry.cert +++ b/contrib/docker-integration/malevolent-certs/localregistry.cert @@ -1,19 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIDETCCAfugAwIBAgIQZRKt7OeG+TlC2riszYwQQTALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDgyMDIz -MjE0OVoXDTE4MDgwNDIzMjE0OVowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV -BAMTDWxvY2FscmVnaXN0cnkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQDPdsUBStNMz4coXfQVIJIafG85VkngM4fV7hrg7AbiGLCWvq8cWOrYM50G9Wmo -twK1WeQ6bigYOjINgSfTxcy3adciVZIIJyXqboz6n2V0yRPWpakof939bvuAurAP -tSqQ2V5fGN0ZZn4J4IbXMSovKwo7sG3X6i4q/8DYHZ/mKjvCRMPC3MGWqunknpkm -dzyKbIFHaDKlAqIOwTsDhHvGzm/9n3D+h4sl5ZPBobuBEV2u5GR0H5ujak4+Kczt -thCWtRkzCfnjW0TEanheSYJGu8OgCGoFjQnHotgqvOO6iHZCsrB3gf8WQeou+y9e -+OyLZv3FmqdC9SXr3b0LGQTFAgMBAAGjOjA4MA4GA1UdDwEB/wQEAwIAoDAMBgNV -HRMBAf8EAjAAMBgGA1UdEQQRMA+CDWxvY2FscmVnaXN0cnkwCwYJKoZIhvcNAQEL -A4IBAQC/PP2Y9QVhO8t4BXML1QpNRWqXG8Gg0P1XIh6M6FoxcGIodLdbzui828YB -wm9ZlyKars+nDdgLdQWawdV7hSd6s2NeQlHYQSGLsdTAVkgIxiD7D2Tw3kAZ6Zrj -dPikoVAc+rBMm/BXQLzy95IAbBVOHOpBkOOgF+TYxeLnOc3GzbUqBi1Pq97DMaxr -DaDuywH55P/6v7qt610UIsZ6+RZ78iiRx4Q+oRxEqGT0rXI76gVxOFabbJuFr1n1 -kEWa3u/BssJzX3KVAm7oUtaBnj2SH5fokFmvZ5lBXA4QO/5doOa8yZiFFvvQs7EY -SWDxLrvS33UCtsCcpPggjehnxKaC +MIIDFTCCAf2gAwIBAgIQfv/raCIVnmpXY74aUyohmDANBgkqhkiG9w0BAQsFADAm +MREwDwYDVQQKEwhRdWlja1RMUzERMA8GA1UEAxMIUXVpY2tUTFMwHhcNMTgwNTIx +MjI1OTA2WhcNMjgwODI2MjI1OTA2WjArMREwDwYDVQQKEwhRdWlja1RMUzEWMBQG +A1UEAxMNbG9jYWxyZWdpc3RyeTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBALedGn6gB0Km693mvJ8yz89wtfDs+SGjJi+XmJv0PYe6j5uToXQH2naXXIOZ +lT9lmXd/RciZwn50aK4T6alu96D8yeLE13P+75rdrI9DWTNHsfx0jwRxUEXNazPI +5Knwbf2MgGJfvHE6LjQ3FStJJ9f8JzryspIAYy5PJETuzoF7GsrUhgmcgQNqQcIx +d81QwOnW3EHastTPIbUxQ3cbEKZMVmvsYSY60pQuw/syN7vGcR/uJQ6HsCUWTEpk +LWFNJYudYnRIJ/mb6bGJ0tJhdlXKQ9+89oiEWZp9p1KMfyXesp8HeW8Jyoa06+Ri +5U82r0oQgC0MI5AueueoNOmQyGsCAwEAAaM6MDgwDgYDVR0PAQH/BAQDAgWgMAwG +A1UdEwEB/wQCMAAwGAYDVR0RBBEwD4INbG9jYWxyZWdpc3RyeTANBgkqhkiG9w0B +AQsFAAOCAQEAGgUESvQoD/QGZQlY2NA4sauad/yMHVo7vs5TLiKxnAfJrnP1ycD6 +sqcbwCu6B1GU7fqGjKKgzXWXHTi4MiLi5bnh5Y2JBTABksGmzNAU1LbQJJkwsPnE +GBF0RgUmcw7a+4qu3TqPJABOsl+RiUQ4VDzP3DFRbyigs2li+SjLTJepahDhAke9 +11lU/r3pm1cov9m0AsKSHrU777Hv5B7gmyJ1FO1Os7/KnkdHKUwiIZx0VW6Ho5H+ +IiCH7iKJ1tTxe3nkwjlkSXnx7xiLOG7QK1LtTNHzBumF4COSF1kvWvIqNhJeg482 +e38+Kzctl5iVbrB+JWY6roTQ26VLIdlS7A== -----END CERTIFICATE----- diff --git a/contrib/docker-integration/malevolent-certs/localregistry.key b/contrib/docker-integration/malevolent-certs/localregistry.key index c5bf7ac1..90e5b798 100644 --- a/contrib/docker-integration/malevolent-certs/localregistry.key +++ b/contrib/docker-integration/malevolent-certs/localregistry.key @@ -1,27 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAz3bFAUrTTM+HKF30FSCSGnxvOVZJ4DOH1e4a4OwG4hiwlr6v -HFjq2DOdBvVpqLcCtVnkOm4oGDoyDYEn08XMt2nXIlWSCCcl6m6M+p9ldMkT1qWp -KH/d/W77gLqwD7UqkNleXxjdGWZ+CeCG1zEqLysKO7Bt1+ouKv/A2B2f5io7wkTD -wtzBlqrp5J6ZJnc8imyBR2gypQKiDsE7A4R7xs5v/Z9w/oeLJeWTwaG7gRFdruRk -dB+bo2pOPinM7bYQlrUZMwn541tExGp4XkmCRrvDoAhqBY0Jx6LYKrzjuoh2QrKw -d4H/FkHqLvsvXvjsi2b9xZqnQvUl6929CxkExQIDAQABAoIBAQCZjCUI7NFwwxQc -m1UAogeglMJZJHUu+9SoUD8Sg34grvdbyqueBm1iMOkiclaOKU1W3b4eRNNmAwRy -nEnW4km+4hX48m5PnHHijYnIIFsd0YjeT+Pf9qtdXFvGjeWq6oIjjM3dAnD50LKu -KsCB2oCHQoqjXNQfftJGvt2C1oI2/WvdOR4prnGXElVfASswX4PkP5LCfLhIx+Fr -7ErfaRIKigLSaAWLKaw3IlL12Q/KkuGcnzYIzIRwY4VJ64ENN6M3+KknfGovQItL -sCxceSe61THDP9AAI3Mequm8z3H0CImOWhJCge5l7ttLLMXZXqGxDCVx+3zvqlCa -X0cgGSVBAoGBAOvTN3oJJx1vnh1mRj8+hqzFq1bjm4T/Wp314QWLeo++43II4uMM -5hxUlO5ViY1sKxQrGwK+9c9ddxAvm5OAFFkzgW9EhDCu0tXUb2/vAJQ93SgqbcRu -coXWJpk0eNW/ouk2s1X8dzs+sCs3a4H64fEEj8yhwoyovjfucspsn7t1AoGBAOE2 -ayLKx7CcWCiD/VGNvP7714MDst2isyq8reg8LEMmAaXR2IWWj5eGwKrImTQCsrjW -P37aBp1lcWuuYRKl/WEGBy6JLNdATyUoYc1Yo+8YdenekkOtOHHJerlK3OKi3ZVp -q4HJY9wzKg/wYLcbTmjjzKj+OBIZWwig73XUHwoRAoGBAJnuIrYbp1aFdvXFvnCl -xY6c8DwlEWx8qY+V4S2XX4bYmOnkdwSxdLplU1lGqCSRyIS/pj/imdyjK4Z7LNfY -sG+RORmB5a9JTgGZSqwLm5snzmXbXA7t8P7/S+6Q25baIeKMe/7SbplTT/bFk/0h -371MtvhhVfYuZwtnL7KFuLXJAoGBAMQ3UHKYsBC8tsZd8Pf8AL07mFHKiC04Etfa -Wb5rpri+RVM+mGITgnmnavehHHHHJAWMjPetZ3P8rSv/Ww4PVsoQoXM3Cr1jh1E9 -dLCfWPz4l8syIscaBYKF4wnLItXGxj3mOgoy93EjlrMaYHlILjGOv4JBM4L5WmoT -JW7IaF6xAoGAZ4K8MwU/cAah8VinMmLGxvWWuBSgTTebuY5zN603MvFLKv5necuc -BZfTTxD+gOnxRT6QAh++tOsbBmsgR9HmTSlQSSgw1L7cwGyXzLCDYw+5K/03KXSU -DaFdgtfcDDJO8WtjOgjyTRzEAOsqFta1ige4pIu5fTilNVMQlhts5Iw= +MIIEpQIBAAKCAQEAt50afqAHQqbr3ea8nzLPz3C18Oz5IaMmL5eYm/Q9h7qPm5Oh +dAfadpdcg5mVP2WZd39FyJnCfnRorhPpqW73oPzJ4sTXc/7vmt2sj0NZM0ex/HSP +BHFQRc1rM8jkqfBt/YyAYl+8cTouNDcVK0kn1/wnOvKykgBjLk8kRO7OgXsaytSG +CZyBA2pBwjF3zVDA6dbcQdqy1M8htTFDdxsQpkxWa+xhJjrSlC7D+zI3u8ZxH+4l +DoewJRZMSmQtYU0li51idEgn+ZvpsYnS0mF2VcpD37z2iIRZmn2nUox/Jd6ynwd5 +bwnKhrTr5GLlTzavShCALQwjkC5656g06ZDIawIDAQABAoIBAQCw7oKJYkucvpyq +x50bCyuVCVdJQhEPiNdTJRG5tjFUiUG4+RmrZaXugQx1A5n97TllHQ9xrjjtAd+d +XzLaQkP8rZsdGfFDpXXeFZ4irxNVhtDMJMVr0oU3vip/TCaMW1Kh8LIGGZrMwPOk +/S849tWeGyzycMwCRL1N8pVQl44G1aexTmlt/tjpGyQAUcGt3MtKaUhhr8mLttfL +2r6wfZgvSqReURBMdn/bf+sMKnJrYnZLRv/iPz+YWhdk4v1OXPO3D4OlYwR8HwSo +a9mOpPuC6lWBqzq8eCBU474aQw4FXaFwN08YkJKa4DqUrmadnd4o+ajvOIA4MdF5 +7OOsHQaBAoGBANcVQIM6vndN2MFwODGnF8RfeLhEf46VlANkZadOOa0/igyra865 +7IR4dREFFkSdte8bj6/iEAPeDzXgS4TRsZfr2gkhdXuc2NW4jTVeiYfWW3cgKfW+ +7BQiHXsXCDeoZ1gXq/F5RmD8ue0TkP+IclWR52AM5e1MzfAuZzaIFNJFAoGBANqL +Q925GxuDamcbuloxQUBarXPJgBDfTWUAXAJVISy80N3av45Y0gyoNjPaU7wHNtU9 +ppnYvM47o1W4qe9AkTtuU79T1WwXFr5T+4Ehm5I8WDHQwkzWGd+WlWkDidLWuvlx +ZkzwQGp3KOTJhO20lpOtCbnOa627Op/zLhCBQzLvAoGAFF4A0+x2KNoIUpkL2TfX +elMIHXrvEVN8xq11KtivgYZozjZVaSgWC51UiJ4Qs8KzfccAXklr9tHKYvGwdQ1e +YeKFrSOr+l6p8eMeDBW9tE1KMAetsYW42Vc5r3RI5OxfjOoA8EbpsTl9acPWkTwc +h5nfbSsLguMpBTt/rpxITHkCgYEAnKwwSBj25P+OXULUkuoytDcNmC+Bnxbm/hyG +2ak78j2eox26LAti8m35Ba1kUCz/01myQSLPIC5DByYutXWdaHTMlyI7o5Td2i6M +5GM6i1i1hWj6kmj+/XqPvEwsFzmXq1HvnAK0u16Xs4UAxgSr2ky35zujmFXcTmTg +xjZU/YMCgYEAqF93h8WfckZxSUUMBgxTkNfu4MJlbsVBzIHv6TJY95VA49RcRYEK +b7Xg+RiNQ42QGd8JBXZ50zQrIDhdd/yJ0KcytvW7WdiEEaF3ANO2QesygmI50611 +R76F8Bj0xnoQUCbyPuMOLRfTwEaS1jBG7TKWQXTaN0fm4DxUU0KazxU= -----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem b/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem index 8c9b1bca..9ff26b41 100644 --- a/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem +++ b/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem @@ -1,29 +1,18 @@ -----BEGIN CERTIFICATE----- -MIIE9TCCAt+gAwIBAgIQMsdPWoLAso/tIOvLk8R/sDALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw -NTQwMVoXDTE4MDUxMDIwNTQwMVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV -BAMTCFF1aWNrVExTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1YeX -GTvXPKlWA2lMbCvIGB9JYld/otf8aqs6euVJK1f09ngj5b6VoVlI8o1ScVcHKlKx -BGfPMThnM7fiEmsfDSPuCIlGmTqR0t4t9dHRnLBGbZmR8JdAs7LKpP+PFYu0JTIT -wFcjXIs+45cIF2HpsYY6zkj0bmNsyYmT1U1BTW+qqmhvc0Jkr+ikElOQ93Pn7zIO -cXtxdERdzdzXY5cfL3CCaoJDgXOsKPQfYrCi5Zl6sLZVBkIc6Q2fErSIjTp45+NY -AjiOxfUT0MOFtA0/HzYvVp3gTNPGEWM3dF1hwzCqJ32odbw/3TiFCEeC1B82p1sR -sgoFZ6Vbfy9fMhB5S7BBtbqF09Yq/PMM3drOvWIxMF4aOY55ilrtKVwmnckiB0mE -CPOColUUyiWIwwvp82InYsX5ekfS4x1mX1iz8zQEuTF5QHdKiUfd4A33ZMf0Ve6p -y9SaMmos99uVQMzWlwj7nVACXjb9Ee6MY/ePRl7Z2gBxEYV41SGFRg8LNkQ//fYk -o2vJ4Bp4aOh/O3ZQNv1eqEDmf/Su5lYCzURyQ2srcRRdwpteDPX+NHYn2d07knHN -NQvOJn6EkcsDbgp0vSr6mFDv2GZWkTOAd8jZyrcErrLHAxRNm0Va+CEIKLhswf1G -Y2kFkPL1otI8OSDvdJSjZ2GjRSwXhM2Mf3PzfAkCAwEAAaMjMCEwDgYDVR0PAQH/ -BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4ICAQDBxOHKnF9z -PZWPNKDRmBPtmnU2IHh6JJ9HzqGALJJbBU0MUSD/aLBBkYeS0YSHgYZ1hXLsfuRU -lm/czV41hU1FTDqS2fFpcAAGH+6/rwyfrz+GYr2K4b/ijCwOMbMrDWO54zqZT3KU -GFBpkrh4fNyKdgUNJsy0Q0it3gOGSUmLvEQUzqxPFVz7h/pF/Cecr0/kpjbpsxna -XQkhtDyKDIQfPCq8Ci1vox5WvBbBkdzDtyCm+KSb6VC3pCX6LV5NkS7YM7mtscTi -QdYfLbKX05kUVG2R9SShJn5BSXzGk9M5FR5koGY0lMHwmJqaOqazXjqa1jR7UNDK -UyExHIXSqJ+nCf4bChEsaC1uwu3Gr7PfP41Zb2U3Raf8UmFnbz6Hx0sS4zBvyJ5w -Ntemve4M1mB7++oLZ4PkuwK82SkQ8YK0z+lGJQRjg/HP3fVETV8TlIPJAvg7bRnH -sMrLb/V+K6iY+08kQ2rpU02itRjKnU/DLoha4KVjafY8eIcIR2lpwrYjx+KYpkcF -AMEC7MnuzhyUfDL++GO6XGwRnx2E54MnKtkrECObMSzwuLysPmjhrEUH6YR7zGib -KmN6vQkA4s5053R+Tu0k1JGaw90SfvcW4bxGcFjU4Kg0KqlY1y8tnt+ZiHmK0naA -KauB3KY1NiL+Ng5DCzNdkwDkWH78ZguI2w== +MIIC+TCCAeGgAwIBAgIQVhmtXJ4fG4BkISUkyZ65ITANBgkqhkiG9w0BAQsFADAm +MREwDwYDVQQKEwhRdWlja1RMUzERMA8GA1UEAxMIUXVpY2tUTFMwHhcNMTgwNTIx +MjI1MjMwWhcNMjgwODI2MjI1MjMwWjAmMREwDwYDVQQKEwhRdWlja1RMUzERMA8G +A1UEAxMIUXVpY2tUTFMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDK +J/SLv0dL7UXaNSEAdTMV8+rOFMcQNov/xLWa1mO+7zNZXHIdM+i1uQTHTdhuta6R +wfqkruPMZ9sqK7G9UIPi11ynkdTiZKRCvCr2VMc/uf5WuIsZE1JXXknSNee1TMmV +Je8TUJsRjEyQDbxn5qUAJLi8yj/O7W8wsnVHdySKMbaLN6v75151TxiIuOoncCHQ +yzz10DzjXfXYajuheu+MLy/rjNGDj0gys4yQZAHlQWY9Lsiiix9rBdXQjVc3q2QT +VM5v3pMjXcPweaIbTWJnbOgmy+267kX6kQpUfZRE55dQt6mPtPQ2idPvqPP3TXwa +AFH39cz/pPifIZApDfZFAgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMB +Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQB93GckXcLcfNdg9C0xMkvByPQJ +dcy0GT991eZ/bNC39AXrmCSfn6a1FRlWoiCOSOW1NIZWQQ7jDep/T585vq2jN7KX +hT/z3iIdNWR+Amvo4pyJ93u2D3uG/bmmguAr62jyIgrJudQ3+Mnd+bj/J33XzAgc +d4ZGPvCmKtn8cTKzyS8rjy1oPSUm6pZnfk41MgMWrGuS5HkC3Aa7jo/4RdgGOJpm +nUdz2FGfW/+gwXRy2e94V7ijjz+YwpzL0wHPyXyAm7GwJ7mfvPOZrQOLLw4Z9OaK +R76t4NZBo5TmtvW5zQVsv3sPRnuqcmR0q6WR/fEuMafVtRVOVuDrZlSy0EtA -----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem b/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem index a239939d..db8c2e89 100644 --- a/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem +++ b/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem @@ -1,29 +1,18 @@ -----BEGIN CERTIFICATE----- -MIIE9TCCAt+gAwIBAgIRAKbgxG1zgQI81ISaHxqLfpcwCwYJKoZIhvcNAQELMCYx -ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNTA1MjYy -MDU0MjJaFw0xODA1MTAyMDU0MjJaMBMxETAPBgNVBAoTCFF1aWNrVExTMIICIjAN -BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/ -oARHbx59G+GOeGkrwG6ZWSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8M -WpLxp5U9LyYkv0AiSPfT2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/ -MgJbdTylEq1UcZSLMuky+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7II -hGlhziLVTKV9W1RP8Aop8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4 -nFwmuhOo8gvw/HhzYcxyMHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCai -gwUNzfe4/dHeCk/r3pteWOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru -5QqKMrbSlOcd6yHT6NM1ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/ -Vlp5N+WRjDpsBscR8kt2Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoO -nhRqhl2PSphcWdimk8Bwf5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3j -NLQ8EmHWaZlJSeW4BiDYsXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeB -twZJXIXR6Jc8hgsCAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgCgMBMGA1UdJQQMMAoG -CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4ICAQCl0cTLbLIn -XFuxreei+y6TlG2Z5XcxJ84mr8VLAaQMlJOLZV0O/suFBu9KqBuvPaHhGRnKE2uw -Vxdj9qaDdvmvuzi4jYyUA/sQuqq1+wHwGTadOi9r0IsL8OxzsG16OlhuXzhoQVdw -C9z1jad4HC7uihQ5yhl2ltAA+h5G0Sr1b9El2mx4p6BV+okmTvrqrmjshQb1GZwx -jG6SJ/uvjGf7rn09ZyYafF9ZDTMNodNXjW8orqGlFdXZLPFJ9agUFfwWfqD2lrtm -Fu+Ei0ZvKOtyzmh06eO2aGAHJCBTfcDM4tBKBKp0MOMoZkcQQDNpSyI12j6s1wtx -/1dC8QDyfFpZFXTbKn3q+6MpR+u5zqVquYjwP5DqGTvX0e1sLSthv7LRiOi0qHv1 -bZ8JoWhRMNumui9mzwar5t20ExcWxGxizZY+t+OIj4kaAeRoKK6r6FrYBnTjM+iR -+xtML5UHPOSmYfNcai0Wn4T7hwpgnCJ+K7qGYjFUCarsINppQEwkxHAvuX+asc38 -nA0wd7ByulkMJph0gP6j6LuJf28JODi6EQ7FcQItMeTuPrc+mpqJ4jP7vTTSJG7Q -wvqXLMgFQFR+2PG0s10hbY/Y/nwZAROfAs7ADED+EcDPTl/+XjVyo/aYIeOb/07W -SpS/cacZYUsSLgB4cWbxElcc/p7CW1PbOA== +MIIC+TCCAeGgAwIBAgIRAMGmTKEnobjz4ymIziTsFuMwDQYJKoZIhvcNAQELBQAw +JjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE4MDUy +MTIyNTIzMVoXDTI4MDgyNjIyNTIzMVowEzERMA8GA1UEChMIUXVpY2tUTFMwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCaFrwVi+BAvng9TebwOLg2Juzg +wnW2Lv2EOqpSYmlZLLub46/W+ktqrcb+nBMBwnbON0woCbMArONuiRk7BATnmLH8 +1e6I9Rax1nCaEpKhhH/b3T9PjwvzrXC+NIqeC46E7AEneAdBa4L/x27F/npLJy7X +PAwcH9ImvACJ9csIObjFnGXNTwtGA2SMIOCiNv3lpyb/Yx20EqBcj+etz8XBjAIS +46z0JDAtYAbJgIs7Ek2XQSrUud18jopzK9mrT9YvA4tDu9Woj70IXdZfOeb0W6Y+ +aBbEoHvqFtyeG7BStNszM7n6CTcJAqpHOMlYQPeRjtMwb2Ffw86NvxkfrjoNAgMB +AAGjNTAzMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNV +HRMBAf8EAjAAMA0GCSqGSIb3DQEBCwUAA4IBAQBv1MfAYTymtDeA62N86QFOwASq +ah2BQqfHvUzcM0U/H6YDEYUEKX2RFOtGwOwSBXr6v7JmU4KuE6tA6s+XWjD/lLr7 +CqWvJfZNP6zARL+MqbZjSmyymtuXaXH4eNVgN0aaGifhUSMDsg0qyZwG8isMN4hG +kd0y1nNCn+Q3V7oe3NfjfdjviLU9PNNBQFbKRJJRQ6y267lFoWwlaHwtNyvDupVi +f+JFMiuG3o+upqBF8UFUV8Of4VL6UcJI0OoF4ngTFzn3gRYaYKmkYawUmIr9vvg7 +oQccajcN1iNArnZwXK3lKSERybrUEiUZ4uZ69wVlXzE2TemhW1iYfrTU1cya -----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem b/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem index acfc9a48..12be8f5d 100644 --- a/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem +++ b/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem @@ -1,51 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/oARHbx59G+GOeGkrwG6Z -WSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8MWpLxp5U9LyYkv0AiSPfT -2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/MgJbdTylEq1UcZSLMuky -+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7IIhGlhziLVTKV9W1RP8Aop -8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4nFwmuhOo8gvw/HhzYcxy -MHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCaigwUNzfe4/dHeCk/r3pte -WOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru5QqKMrbSlOcd6yHT6NM1 -ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/Vlp5N+WRjDpsBscR8kt2 -Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoOnhRqhl2PSphcWdimk8Bw -f5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3jNLQ8EmHWaZlJSeW4BiDY -sXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeBtwZJXIXR6Jc8hgsCAwEA -AQKCAgBJcL1iR5ROMtr0ZNIp4gciALfjQVV3gb48GR/e/9b/LWI0j3i0sOzeLN3h -SLda1fjzOn1Td1ma0dZwmdMUOF+hvhPDYZfzkwWLLkThXgLt/At3rMYstGWa8pN2 -wVUSH7sri7IHmYedP3baQdrHP/9pUsGQc+m8ASTE3i+PFcKbPe5+818HTtRrhVgN -X3oNmPKUNCmSom7ZcKer5P1+Ruum0NuDgomCdkoZgfhjeKeLrVjl/wXDSQL/AhWA -02c4/sML7xx19nl8uf7z+Gj0ir1pvRouhRJTwnRc4KdWu+Yn7WLU8j2ZKf5St/as -zjnpYVEdCp0KSHccgXtobUZDEG2NCHmM6gR2j3qgoUAYjHyqPYlph2r5C47q+p4c -dDWkpwZwGiuYq9qpZj24X6BfppxExcX6AwOgFLZLp80IynwrMVxFsDd2J+KpKRQ1 -+ZtYPcULwInF9MNi/dv84pxGOmmOaIUyjN8Sw4eqANU4T5uvTjUj7Ou6KYyfmxgG -y++vjpRN7tN1t1Hwde8SVWobvmhU+5SJVHV8INoJD7uciaevPo9pt833SQTtDXeY -PVBhOKO7thAxdUiqlU/1nGTXnf1VO6wAjaVYoTnP4tJ97WuTptwd2F5znVWHFGVh -lzJAzmFOuyCnRnInsf4n5EmWJnT7XF2CofQqAJ8NIddrU8GnQQKCAQEAyqWAiPMK -I/dMzlS7oJGlhbKZ5R4buc+EoZqtW7/8/S+0L6IaQvpEUilD+aDQyaxXjoKiQQL+ -0UeeSmF/zU5BsOTpB8AuJUfYoUe0N+x7hO5eIcoCB/QWYX+iC3tCN4j1Iwt6VliV -PBYEiLUYPngSIHob/nK8UtgxrWQ3Fik9XJtWhePHrvMvDBalgCKdnyhuucGxKUjc -TtPcyMFdi0z4Kt/FAm+5u/v4ZkO909Ish0FrAqQ9t5ETfvTTTYKBmzny6/LSPTK9 -0XIsHltuC1xG4vGQsES/Ph++Yj3Vn011FqvFZeBUHbfcQuB4h5wcb+90d4GU1kux -eabsHPIZKrlN4QKCAQEA2Fs8NAN5K9i7qbxZCJPi6DJV6XMznk6JVGb+qkkChCyq -IOXb95+c9CIpe6w2d3res3zvML3zbdz2Lyp9G0ve6tSlOaSnHeyIxZ5SRB+yQrcF -GXtsx370bOGjCi1/NH85kwKlMuROFJKleJQv8rKpIEo5aPSPV9Cc/VsUqBpvR+O0 -U1HMv57P4yJA/ddw6imHJBl3jTmWBpK4B+LBsCbdypxdVoO8t32Lb2BqDTaPJfYU -RJUpjn/efLLoP6CWxYtqpUlY5tc7NJGAokl8Fo1mPn02klydvs09uiXE80Li2Hoc -/meMH07Lbt2VTw6iGNRX6VpIHEUZGZeS6rbAvO4ZawKCAQEAjOtGVPXdyWEB0kHu -MBzYY/7tMf0b/rymWNL9Vt5NiauQu8cYSBdNR21WzdLdHkFwqbOCLX9twA7zrnna -q+SNnfuxaShlbptls9HvKyySQMCaSRj3DJzaq3ZcM2vFgmUFQxeKPV1geeY9xOta -LqbExDzmFq2m9F1PPmqAPDL1bt6+7mCVzb1irB9be52WysUNKrPdBP6b5V1DHYAK -EwK1WOs/TxBusqDn/gWBjjmLqYr+ZVndaTfDvPd3sWDdzBoiKZ40QUZ15Z5lu76M -6e2DhfHCUjGcZBEjDaI+WYc9s0REAzJajEf9Lax3ZKZUyCpWbXx5CgSdKCHB8+cP -RTyTQQKCAQEAsxx8r5a8hocLfQ43Kvm7HH0nUHeVoRXlbOFDLNf6ZE/RnCCOxOX3 -esiZTRAZmzo2CaOBJPnr/+SwTgW/woxCBGh8TEc6LnS2GdviwRD4c3CuoRTjzhgU -49q8Ld3SdDRrBoBnIMWOuktY/4S2WRZ9GwU3l+L2lD1Y6gmwBSa1P2+Lxnpupagk -9CVUZpEnokM05LbMmTa2M8Tc43Je5KSYcnaWctvmrIUbnN3VjhC/2y5oQwq1d4n2 -N4eo65vXlbzAUgtxtNEz62YVdsSdHNJ8dXkVZ3+S+/VPh75i2PxjbdFSFW7Futlx -YtvAEs3LdgC8squSDQ1LJTutXfBjiUUX9wKCAQBiCMre86tLyJu6Qb6X1cRAwO7m -4kyGzIUtijXko6mWxb4X/usVvzhSaNVYbHbMZXjX+J5vhBOul+RmQ3EY6nw0H2z8 -9D4z/rnQVqeb0uvIeUhBPni+s4fS4bA92M6Ie5bhiOSF2JjjJr38BFnTZARE7C+7 -ZII7z2c0eQz/wAAt9fWWroAB2mIm6wxq0LNij2NoE0iq6k2xJE1/k8qhXpsN0zAv -bjG72Q7WryBeK/eIDK9e5wGlfLVDOx2Evlcaj70oJxuoRh57e8fCYy8huJQT+Wlx -Qw4zhxiyzAMq8SEqFsm8dVO4Bu2FwzmmehA80ieSb+si7JZU92xGDT394Im2 +MIIEogIBAAKCAQEAmha8FYvgQL54PU3m8Di4Nibs4MJ1ti79hDqqUmJpWSy7m+Ov +1vpLaq3G/pwTAcJ2zjdMKAmzAKzjbokZOwQE55ix/NXuiPUWsdZwmhKSoYR/290/ +T48L861wvjSKnguOhOwBJ3gHQWuC/8duxf56Sycu1zwMHB/SJrwAifXLCDm4xZxl +zU8LRgNkjCDgojb95acm/2MdtBKgXI/nrc/FwYwCEuOs9CQwLWAGyYCLOxJNl0Eq +1LndfI6KcyvZq0/WLwOLQ7vVqI+9CF3WXznm9FumPmgWxKB76hbcnhuwUrTbMzO5 ++gk3CQKqRzjJWED3kY7TMG9hX8POjb8ZH646DQIDAQABAoIBAE2SfnOWbHoLqXqr +WkS7OTnB1OS94Qarl2NXKWG6O3DyTSyIroBal1cITzLkncj3/lmIiyVo5J3Fa+W8 +zV/hgRqay5gOlzyJrjgvTZazHPCFRN0KABJsYEb3nNeUmehAxynxqg8VpQlxN4zO ++NxiZWyqODGRAEO0XVa0tMy/Wcw0guD18+U9GYiYQi3d7NEHTt5d8CX9VKY/bHKR ++ecC/lr7URnA/8FM60mKI6MAiHPxyUjJ7/6dq1goG8dDHcAtOEEIawECQtRfQ+Dn +RL55nDPRYNviXRgr8u61TFm8zgkTUQy2MLRkHAyP0IBLUiMpqDdmXB4LNMQQSrsY +0FyinIECgYEAy3eT5ZUb/ijGsWUT/DizUoetFfg8X4LV+HRLXdlxfcOYB3Elbeks +JPC+Tdm33nB0lqo3hLVNPB9yqJiPOOaWQPpWBImOeitpmDRAagjwUewJwLY9RmKT +RD0+YyCC0SwvSDFDsWF+ncW/8XpobvetCSC6mmjX6Wr070yHkhDeeC0CgYEAwd9v +P+TjgWVyL5YRiOJ+wjR7ZKpHCiSSxSTjIhq40hs5LtHddSk9e/+AU0otcMExzCqN +E4f/e05a6TD5CFAgmUMK7nb49ept3ENVoD+M13K3tTaTyeZghwYNNK56osDtdCgc +c68jQAy81gU7iRt30xbLVV6HgGdrSrWN8D8DFWECgYABkV1RYpHBppzJVycNRX6U +PzllNvF4JvDxJixCf99xAaXVQNjx/N77NeOxg+D31NQBKTSeUCtVMETY6bwIyzYT +MBqjlE/FvznkE1r/tivr5a65jm3wcegCmZo2d1SqufVvT/nejwrDunddK/1MBZqO +vHLTp8UqJknW4jcVOA4OzQKBgG7BdozJ9i62BcWptdq9iizoTpXzsSHaQv7dU+Tn +3y4o30IgIqQMK1PrYyQx/EOuGwTISlAeIZYP7V/K2nolTHpCEryouxHCG4D59rDV +nWB36PtdcpClS//XNTQjeWwBS6ZQQ/DS3RB6NmcOFjT9vDabjw32MvLoIiNMFQpq +9RgBAoGARQnQ94oH98m/iheJpzaM9NhQhAoXSi4w19FySCtnyZTYTd0A7hjRzsSl +DeoAkIGDHyy33RPK/kPtA6dxM/DQ00IkkwH4soaDDbnCmagdw4NnY8eA1Y/KSbd+ +XNNm+sDafoVyCojtsTA7bripKB8q5vPYo3qRLfQ7dwMeRPYblPI= -----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem b/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem index 3ca47f45..a76a4df8 100644 --- a/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem +++ b/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem @@ -1,29 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIFCTCCAvOgAwIBAgIQdcXDOHrLsd2ENSfj5h8ZmjALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw -NTQwM1oXDTE4MDUxMDIwNTQwM1owJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV -BAMTCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2K -saEVcHq0eldu5kABbWtZsf9keK7lz8beVIowzOqp5IHpGlggtH7xDVeigA/sLdds -WTgKEOq3zsJzdgfEti5TNAjjmPqjMKkolqv3LXDJG0dZ2GZ8W/eBB6X1wB0LKr3i -ye3/5jb/wCZYVGGMQXj0VQxY8Qq+OHEp0effeheJqA0OYOj+RaZwi20OR/KmJRgY -wXU33bZyapuyT4krhFlFbtzXeKsKQPrT2ePWxPAceqUGUTIqyJySYIw6vb72YxjX -FNRw6Jg7B7RqVJaVCfBrVxtAv+rCLOhUOVYmWhgWEIODPXiqOGwB0VUApAVAYqfi -TYnJIZ7QYLlQx5VPNlzZuSJTUzKmHQLtLcTqdO5HmLxfxc0WuS/ftK916wy/jpSc -m2DiHjIy6aAEaHKGQrNgT+no68kp30xkYAVsIs0BFpl6Q2iNr5e0uKta82A0xU1Q -we7swSHOHCevuDZfFA/CqnBptOjvNUuVytcroCeCrV/ftp75w/Fd9zOcb6LGLxM2 -2UzhkSXl3II250xj74Q3q8T9TDxCLty7oiawhaYKI+8SDYc510EQ7MH46WMO+3Uq -JkpmmELd9POgnnZ1JrCFmf0flUKTi2CqU3wrBPpPMwFBxoFipp5iL87npACHc3DY -6uaoF4Pf9Et1Fd7HRon8RMsKkrSF92NFiBx5UvhZAgMBAAGjNjA0MA4GA1UdDwEB -/wQEAwIAoDAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDALBgkq -hkiG9w0BAQsDggIBAC0F4ci1nqZ9KUhEEAmWmy8g89DovNNIGSC51r2WJ/COmYUX -X70TONscsBL/kx5MK4xoAmb+EN6Yy8i+z9NkNJd0B+2MjXPMFBpgGb0UiPv2wEmZ -5PAKyjwTxNIm6L/nFhkmVqfsQHfjHukXES4C0ff6fj6fuDpBfl5nTlVmc9LpP+hT -5RAwW10qumucGxAWGNBWW+K66cf8O7n/0nQykxJxYjBx16ZB80H2uvqFDKDVFqze -co5M4euXQq9KiXPRlcC9rab2a7FGLHd0TyPkq6TvfsqpxcryyKS4rIAz3sQh/tl/ -/qm1tBcZW2bce3UlF2Wb2dW9HqvIu1O84f6ptLqwgKcIdTbwgQZ0kbFoWE2kWJSV -w+eAFb7tz1LDTpF3NRlz+1K27pBQWRQgcqoIRoQXpC0LfQY9Mp70QIfUQdUh6tnO -8hmq5y623tfxiDwCxb/EOpwCmwK1Cp9cloZTDefVE1r6NkEJWeeHG79VljUGF1KT -NKzXWrrsFtge/hU9Pj+frcZO9qExxPCcsrdZcoK7Ll8s+pjulRvbnCnJkNpeOI3P -iz6+sdGmzKSKg2daRM67Zmy5tmlBEX/eV7kFqt+b3HsdUiLo3Ng2lyPLNNDfwUtB -EukgYGjVJoyqLjLXgsCxLJlk7X/ogVwf8SlAnQ7p6KuxGWm02vlUpEmJp+Hq +MIIDDTCCAfWgAwIBAgIQfzdVwVz4igfdJPd6SW/ENTANBgkqhkiG9w0BAQsFADAm +MREwDwYDVQQKEwhRdWlja1RMUzERMA8GA1UEAxMIUXVpY2tUTFMwHhcNMTgwNTIx +MjI1MjMwWhcNMjgwODI2MjI1MjMwWjAnMREwDwYDVQQKEwhRdWlja1RMUzESMBAG +A1UEAxMJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +v+H3BTOGLRYjyPx+JQQcP5r8HHBmjknflE6VcrbRD5VGx8192hwsjAdlL0kz1CEq +FW2KQidJieDi8iIh9BWB8lsTQ51xZGnry6CbVXxTbv1Ss8ci9r8Cm3GPjWy5gqTi +DTUUQez8xq29gUod4ZvRoJ8jl/eI7gF7MBFakv7tZQ40SHcogjQoG7nKMXG1VOhX +D4kM120E+hW9x0U3j0SaCIYl6bG2RHIvUMlrVnj4es6JBVzqItkhAwugE6ytneOh +VxWQ/7e8qKW2+lVsPnH/zjNES0j/9XYgVCjwkgirxjs2eZRIS5Mg14DdYqfQ9MRQ +EoyQxl3xcDxjqPocMgGYHwIDAQABozYwNDAOBgNVHQ8BAf8EBAMCBaAwDAYDVR0T +AQH/BAIwADAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEB +ACU0E2BAdqjVvO06ZyHplxxQ4TQxK9voBCTheC2G7oFaM4VLFf48GgoMkvbsMGyd +1JqIACCDuSJ5UVjmWm6VIDZrnRsf/BbQCTZXKQd4ONLL5DU/OPjAFKGeCpAK51yj +OMHdw3cQmMCEpMH9HHJ+iB3XWLcDKPAxTkcsBytC9VLUgU7Q4+3eYIT/j/ug+y4G +W4A0cmdDDuozwBAPXj7ZLKdVlkUFka8WjQAJesHTIifS1bfahGiSNVJbYjXbGoML +d0IeGMd1lXlc2M+ygqZsSM2ErzndNdvDs7S6u/FIICm7uW6P2naPeMtedb2orO6Q +5O3gRtj/UQjegI0XV4YO2TQ= -----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem b/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem index baccc9eb..8d58ca8e 100644 --- a/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem +++ b/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem @@ -1,51 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEArYqxoRVwerR6V27mQAFta1mx/2R4ruXPxt5UijDM6qnkgeka -WCC0fvENV6KAD+wt12xZOAoQ6rfOwnN2B8S2LlM0COOY+qMwqSiWq/ctcMkbR1nY -Znxb94EHpfXAHQsqveLJ7f/mNv/AJlhUYYxBePRVDFjxCr44cSnR5996F4moDQ5g -6P5FpnCLbQ5H8qYlGBjBdTfdtnJqm7JPiSuEWUVu3Nd4qwpA+tPZ49bE8Bx6pQZR -MirInJJgjDq9vvZjGNcU1HDomDsHtGpUlpUJ8GtXG0C/6sIs6FQ5ViZaGBYQg4M9 -eKo4bAHRVQCkBUBip+JNickhntBguVDHlU82XNm5IlNTMqYdAu0txOp07keYvF/F -zRa5L9+0r3XrDL+OlJybYOIeMjLpoARocoZCs2BP6ejrySnfTGRgBWwizQEWmXpD -aI2vl7S4q1rzYDTFTVDB7uzBIc4cJ6+4Nl8UD8KqcGm06O81S5XK1yugJ4KtX9+2 -nvnD8V33M5xvosYvEzbZTOGRJeXcgjbnTGPvhDerxP1MPEIu3LuiJrCFpgoj7xIN -hznXQRDswfjpYw77dSomSmaYQt3086CednUmsIWZ/R+VQpOLYKpTfCsE+k8zAUHG -gWKmnmIvzuekAIdzcNjq5qgXg9/0S3UV3sdGifxEywqStIX3Y0WIHHlS+FkCAwEA -AQKCAgAtZw3V8P/+el1PpqoCsNzpqwvQn36bc3CKvPwtM1tJQa2Q92V3DQdr9rDg -7pjGkankpGorKScH4ZLseLy2h5aKRCZm9PS/DhbbCs1wrDhtO5AxeKYPGhYNiOpx -VvwuHQ/Pohfmdn7KgNrKrW1WIBW5CWN+2X4mq2Gk6aYLHgKZSeB3mf1st6mNRACW -RZg5OZKW3VMv0a/l3cVaeqooXwQ/PtUkXhMp3ILnnKly3Gulzi2gIyj3EQ5vODSe -O3gND/UZOJwwgGG6Aief4fnDc7an+c1OSgBr8OVC21Ys3dfQWWV0os9gVFhymX8k -2AgRf6jP93sFw2NSY34KvcGZpKG59oMDxWF1vPo8sOt17Ey0+qp3eUtB3FfE7Wtf -BaLaD/x4U91izIqOEMzQ6QiZAyvmUoBkUSo125CYuIkt8C8Q1lA1KjihETWF37QR -mr8LUk0A0x3SErtm4wVfeDEqVSfI9gKpk6i6rlUzuCjv58Rc0yyqoghXwBWM4CKj -5ZHYpBKAxj4bM6IrKnodAOcsyVk2c2zVTaMxPhoUj0fF7IE5Hy6YAQ/yBheZEM1v -fhsdBFyS6OqSCnN6UinhH268QPam82lfKTFjW5lOgsSDQZ9rhiWoyamhonJTq65I -nb08f4mzT6OGMwV13zq8dXio6WnUIQAhXdEYWrMBmxp5b6CxAQKCAQEA4kmwV3Nb -n3ZIzVAp2l+yGZwdg4YWzN2kcfdNkL8I+Pn8pWrOwv/uGQYmM0786ys9kB5lu4FR -TMcoEo3AaK/z8N49ro2Kl6HcTmxZgTMr+cl6iwetzqYdkRK7klxyCv5uVloDQDtc -AulDH6RkW9BfRERpi6XtlgiFdJj5jMvXMpwGHX69JVsXb83ZSQESjI2JfO9Y8+4M -a7hNKWW/W0ZBrGCcQQPbgpysfJ+PFKUF/yF1h8SSCdetW2Kv2ix16wL5uHKINYmZ -Y/Om+/AFnUOQlANycgThtgBI5mvg9Khq6W2i/RNcIL7bvwAzq1p+o6cGnImXo4bY -hC4fs2/aeX17UQKCAQEAxFQHSLBYDLal5CQYbHbNZ2sLjwRUraEd/+BA8XoERVVQ -JPihgEvTPEaHnWrFTw0qaGKgMZ5SZCZSWUIfXjYvQIUcEMhNUOHweXhJJhifO5sd -sTuvU7bWg76F69bRKfp8KM266m7qMYv+tNlQ6Kbz/1ImsW00xb86vCK2hPfhldtN -d/iBb4HVDu1uoATHUNuqsSGj/UvttKudQdg7MapzM4N+D4m6rPZUjQmtoMWOXt7R -LYrqEOHWfkxXKlVHw1cL9uzUpArvnR0VcYvGfXiYJFbXWsEB07VxIoLMPEtPbpH9 -YLY37KugrthEVnsbySmZIWCRDEqQuuAaa5o8S1naiQKCAQAiU/dybMebe0A0FVMk -E5xbEjnP+AmBbqZBu7iCmthrnNDc70UKg/TEyxAEfJkVu+uM72+TcFy6/wNvPR3R -Q9AH3E8TKdm6gw1+wCUb2n1zWUND0Bhn3v9hQKw/2dJbJJnsc59GoTqmHmjWZgPr -gcLSAmbYjoVqW0STmZlR6KJuxQiQdOeQwS7fASVTU9xSgi43S7/80UIFHWJnQ04y -NIhF9CoAGuuz9ryb80CraxVrzNGdlQ5qe9OKp3/x4wjIbB0iBA3xwTwJ066jTZgs -cVF/gr5b2a28BHMKsZbgxqPhYYZ2SfeR6CJB6W/tML9BaFcybBUa85vpAW5BtFg6 -UfThAoIBAAp1/71byBVFVimF0tdUrTUpewAv1uM5hoOvy0YSnk+jcBXIObLAV40K -pQc6PTEtHmlZd/es2+8CK7kd0NYQRQxHC2vJgHUi1NFkG2GwRivC5B4hdAId5+g1 -KqWaWKLH+f2imKcNKeVh9Dxmp+z9mFquYelqTDmNKvADWX5URuzZNpOB5kOuw098 -TzyvhH9GdR3jEP3aIdxSmJp9jwnibyj7hKgHSq8UoQSy01GRtThQ3wxyLm6f2fH4 -11wmFyDNbpHFpL7o5kOU3SOjsvvUhSbKiccIKbTCIjkYhxFfYegeV0Xj767opjMq -ytlgzeY2FTa2EoR5JKUQc9fv6+6H5yECggEAVVfnywPm8QXn+ByFDdUndZg3uEje -DGyvt1M3mIz5geyRZO8ECzgsZVzKgZC8jDB4lPKz3AGgNlUl/vyGHk6CtW6V6ePA -EXcmOkkMKJQMdopY2/cE6YlSpBGMCcnfothgL0HXxYoop4xVjb74k7tFcNrIDoRx -zp9dSalgxx9aMeaURRbMWf8AhWLZUAjJ/359M1SmcNW619SL3p8Q95Nptvdiltww -lWOCkBdgkjW0mel+Mi2+gY8UPmgNBMPrJ1z9b7b7529YCv5Oci8ABn/N202nhjCp -LupADooNknOMLDyqwRorEv4g6wRjuPIYTIhI9fO5ranu089x+mmGU2tCBw== +MIIEowIBAAKCAQEAv+H3BTOGLRYjyPx+JQQcP5r8HHBmjknflE6VcrbRD5VGx819 +2hwsjAdlL0kz1CEqFW2KQidJieDi8iIh9BWB8lsTQ51xZGnry6CbVXxTbv1Ss8ci +9r8Cm3GPjWy5gqTiDTUUQez8xq29gUod4ZvRoJ8jl/eI7gF7MBFakv7tZQ40SHco +gjQoG7nKMXG1VOhXD4kM120E+hW9x0U3j0SaCIYl6bG2RHIvUMlrVnj4es6JBVzq +ItkhAwugE6ytneOhVxWQ/7e8qKW2+lVsPnH/zjNES0j/9XYgVCjwkgirxjs2eZRI +S5Mg14DdYqfQ9MRQEoyQxl3xcDxjqPocMgGYHwIDAQABAoIBABbp0ueqGXG03R0Z +Ga8t6Hmn9kcnHPgM1kgNgkcqkZh8yPD/FvI+vwsRrwGQikHgm/fnFsWDj4KJelBT +xx4wm03nlktSt8G37FJqoWH58LSmR4P0WbaBZLxPOUc4Hob9TYkqN3sP47eN871G +rn7MbqHxnvx8sLtLLfy1dc1r58lTTZB7YL1OPV7B/VYhYFDtpkUBvadV+WJ7SJ5G +UHrBsshOUJbUI4ahmc8izi40yDw+A0LRhtj3i7aFr2Og+vCq9M8NXDjhdOu9VBkI +fvniC6worJk/GnQDJ/KT5Uqfejdd3Pq7eHp11riqwua8+/wi726zRz9perFh/3gJ +pYjaY+ECgYEA+ssW+vJRZNHEzdf8zzIJxHqq9tOjbQK9yyIPQP5O4q9zKvDJIpnX +T31aZTLGy0op+XA9GJ7X0/d1tqo3G2wNBsFYWPn3gmVVth/7iHxRznorNfmsuea7 +1gFm19StL2+q8PaZ4fx9vUcWwDHlALYTYlTaazms6z9FWD/KbB8kiWkCgYEAw93H +Pp12ND3f6p2rYbXPfHJ0aAUbrQR4wRG3ipVWXGjvn2h/CbrLAt5W1wB3iwnWwatX +opdbfzjxgb0wRQHSPNVj3/SOHr8E5zH/mw+eV7mOea4xlCLTSIAJNzW1320hwsbw +FrEC5qe41PrbMUu+4LvXPkHCKVxRXaV4QX4YHEcCgYEAurjegTRM+X1cw81dwn4E +265g/6iO8qip2kWficpNvWTXoE7p0cMslVhFJzdo3w52teqk8mHBW2XQ1JFiuh32 +jOMC/iwN5Z3A9PpW8kVtOwemiGc9/KMXkrw0b9k+oCTJ5uITrDeq/nOhMrNzRtZJ +FFsMy+yDHBtda9kCwwFk2JECgYBQUpbu+qwK6IT3NgmeXGzmYBmUvuOGpJrQsm9O +iceMxgvel3/hgZTXbE64hRyBDFvhuF6L8v42widoSSmOYxzQjcITibruqO9d0Ic+ +E72fxBzFkcYLNezngnpFBeW75ok900+KPrUt2gJWdTmGkcWJa/7tLRJu28kSWlVi +pk9E6QKBgDH2Uh61ToeNq8Gbnue3pnhUddHELRFQfwHHaa4tFrXBHuPLKqkVefKT +A58awVoPpKTECROeyqe2DJXg9EdSVzKyhg217N/07NRaunfCJ9/TSpFy+5Xls7Rl +U7zK25S1/13KZ6rGVHpmP6Q82VSnsHkPtUfDo3A29llqIQ8je43Y -----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem b/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem index ee72c0c6..633a126d 100644 --- a/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem +++ b/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem @@ -1,30 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIFETCCAvugAwIBAgIQJ+iLgsp9gA0DmROqW+tHFzALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw -NTQxNloXDTE4MDUxMDIwNTQxNlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV -BAMTDWxvY2FscmVnaXN0cnkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQDHR/A6uiQ9X/Xh5ivmdjRr5XVr1D7+fU9Qu6ohArqtBuJsLr6t2RBTS9w6PIAf -xjQSMSFlrm/CY+hbfBMSgm9NeH23o3kYCgoEPhP/634A45W5xwUFno388U8/NHK7 -qwzSP1ezKXfXNvzuo1mZhT08aVdGMOrZUcZZZl8R3RPcIRw9XDSfXKVkMluH6egk -8iLdOxdIdRS58DeSI09FskWe3cIZ5kJmMqnKoIbYSJCVVeYPO0RFlIBi+zpdVyI/ -r9LG0r0plRdz/HJevbOitU2y93S1s9NWMNEkOFU1PFJmsF3ZzNqJFCySj00y/Hcs -jPULYwIxYdqcv16cTNmd3P6FegvuzLJLjNuGaLJGc1antv+p62P7ZdE3DyprFuxs -MJgDL9+NjDaIzoamFf0Uv7K3F7hxrrAHfvm1CMUOyQLg9J6Wl4mLsOy2ZhCbdNFs -T6dobAUGvz4Muj9V8V5pR+nFehjmsPENSsTcs5j0e8zTWtvMFISdS+NZAkpiz0s4 -PV8DLgk5Rp1ZG2V5OnRPLMOTgK0nngc5GVaxf7OYCrFHbBJ8tL93MXNQptNFeBpV -FhjUGqVFcz+6nbFX2NsFLZnghQRs9lej4TTG33NSAYusKqhVwpYFf8CsXCcvYuU6 -RlkCYjr3PB+nX1UDa0eUGm0zOabf9O3D1VzHQBpDuzSHQwIDAQABozowODAOBgNV -HQ8BAf8EBAMCAKAwDAYDVR0TAQH/BAIwADAYBgNVHREEETAPgg1sb2NhbHJlZ2lz -dHJ5MAsGCSqGSIb3DQEBCwOCAgEAaPfAs6saij4FZIPbzAb5M6ZVvfXBg+AfH52t -p3tFsnWUJCiOh9ywsc2NcmJdleKDc4/spElFMUarHqcE1ua6EH15O5GEnHWKj8EY -PVQFrPvf30UkRGNPl8eC7afZtCNk9MLllIATAzBr5Z1i+psV7MmgBKpbZ4B0TnhR -GXNT60QaCJ9RfUuc2z7RHJNo9XTn3Q44X7TFj+P3jHOWzTf8y6Mz6saTy2bugIUy -AfRgRgq/bB8hRjrazg55FIlrMv7dr3J0cIuqmaHfsw7Q2ECMCXW8oQXMBzfuIT0n -sG4u0oVxdNx4OdHsAubGjjwNDhxJvN5j8+YFqZMu03i8LbyamTwsrZg2C3QrRUq8 -SujQEEB+AmO0lpuJ24FsOOYVSYCpLy2ugrKOr2NUqbiBKZs8uBh6RGACfunMZlEw -4BntohiO7oZ5gjvhGZNUEqzMChw7knvVjZ+DkhFk9yE4qIL7VsJSUNI2ZJym/Xeq -jr/oT8CpP8/mFZspa6DFciPfhGLQqKcaZZohL7461pOYWY5C2vsJNR2ucBZzTFvD -BiN/rMnIGFrxUscCCje6RLmrsZ3Lb7bfhB3W6kwzLRfr/XEygAzx6S2mlOM34kqF -HFpKrg9TtLIpYLAKAIfuNbrLaNP1UKh7iLarhDz/qDcvRka/qJTzLD3eLeGXefAP -KjJ1S7s= +MIIDFTCCAf2gAwIBAgIQM3khHYh+82EC0qR1Pelk2DANBgkqhkiG9w0BAQsFADAm +MREwDwYDVQQKEwhRdWlja1RMUzERMA8GA1UEAxMIUXVpY2tUTFMwHhcNMTgwNTIx +MjI1MjMxWhcNMjgwODI2MjI1MjMxWjArMREwDwYDVQQKEwhRdWlja1RMUzEWMBQG +A1UEAxMNbG9jYWxyZWdpc3RyeTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAKA8e9cUSyasRtEHw3yGW5lFCnnZIN+SSvykAOynt9LLKzU5G5ge3ekBtzsl +HE1ndeYjy/dK7XkECQBQ0csF+KSacU5QiZek8g6btH94HDwltCq1I8f1E8LQFP6k +483MKZUDeNNnHzbuK9xsMjYOCrJWGysLHnKjzK/+yfVPwTm9tmUVRqd4xjw1oYY6 +C7iCffIWn7+dQKDjHrn+KyheIy244v5y63AaxgPfjHrtvJtz1vPqxi+FyzDM7RfZ +GIjklC6KaKHmxvUsB0hO4WNb9kt8FBvnxOxuDKf+rUYKTg6JK72O3TaUauiEvE2X +SKT0vYpLoep5hc9ns/yh3BuuznECAwEAAaM6MDgwDgYDVR0PAQH/BAQDAgWgMAwG +A1UdEwEB/wQCMAAwGAYDVR0RBBEwD4INbG9jYWxyZWdpc3RyeTANBgkqhkiG9w0B +AQsFAAOCAQEAMt/lnR3Wy99X/knvjtg7wsPz5T9sZ5hGy/9sIm8sFdsqt5NZi9IY +vS+eyij1yHvOU+pqOxsYQ2NG26CS0CKM3JWLJTo/w8GyiSwxL8a1/UxHmTxDnSMH +cYZRsuPtdkTiAuZhoT5I1ZTsOa7MQF25HiFBL6Ei88FFhcQQjJ7+xYDNhSoddMtz +U8mUY6NOENmvE86QMjWjaj1PXPLO8PxPIqw482Ln/95pHzuaxAYMvxhs2aQlBS1/ +9+vi6VOkbQna9+crmzniXjZDx5QdvMN2QwzFL4hCgqbebVg0zwjhByOwQIjtNEXE +gqxjLkTNOdSva6Fkk/z8BD2XSZ4L+nM3Mw== -----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem b/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem index d20bda0c..c7af00b5 100644 --- a/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem +++ b/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem @@ -1,51 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEAx0fwOrokPV/14eYr5nY0a+V1a9Q+/n1PULuqIQK6rQbibC6+ -rdkQU0vcOjyAH8Y0EjEhZa5vwmPoW3wTEoJvTXh9t6N5GAoKBD4T/+t+AOOVuccF -BZ6N/PFPPzRyu6sM0j9Xsyl31zb87qNZmYU9PGlXRjDq2VHGWWZfEd0T3CEcPVw0 -n1ylZDJbh+noJPIi3TsXSHUUufA3kiNPRbJFnt3CGeZCZjKpyqCG2EiQlVXmDztE -RZSAYvs6XVciP6/SxtK9KZUXc/xyXr2zorVNsvd0tbPTVjDRJDhVNTxSZrBd2cza -iRQsko9NMvx3LIz1C2MCMWHanL9enEzZndz+hXoL7syyS4zbhmiyRnNWp7b/qetj -+2XRNw8qaxbsbDCYAy/fjYw2iM6GphX9FL+ytxe4ca6wB375tQjFDskC4PSelpeJ -i7DstmYQm3TRbE+naGwFBr8+DLo/VfFeaUfpxXoY5rDxDUrE3LOY9HvM01rbzBSE -nUvjWQJKYs9LOD1fAy4JOUadWRtleTp0TyzDk4CtJ54HORlWsX+zmAqxR2wSfLS/ -dzFzUKbTRXgaVRYY1BqlRXM/up2xV9jbBS2Z4IUEbPZXo+E0xt9zUgGLrCqoVcKW -BX/ArFwnL2LlOkZZAmI69zwfp19VA2tHlBptMzmm3/Ttw9Vcx0AaQ7s0h0MCAwEA -AQKCAgBd61qd4vKHdn1kzNztzdHg9BDGFA7oU9iYvQlua2HdgDwgLluxhXa7Oyp8 -y9y6nOgXls4dpPuJCxsMWsqGU7DvOxVNAh9lI/4ah8NXPv5wntIG73Q/dL2Ic5Yc -vLRCHFh7klzb1HRlmsXUFmp4/yGgIil+rDlS2MZ5hdTSj3X3ricoCBfI75oHQfB/ -es7s8q1ZxKqxfHSbOUqHdlq7B0zmla8QE8RBdCkvlT5YGsMBjq1RimYfwOBNRgf4 -y8MZbt0Q1WtPeLPH9zdTzWYnDfmjmhqINEsq+PDoeCA4aciQGxjwOCrapgZnwF/q -4q+r8HbgufXjnjGw5ERLt7BsRSYynoJiTWQ3p/wZ2VLpjFtxYxoJ5/qpQvbZMgGS -Yu3FZNC6cnbOs+JWbdm7Kg93N24cBrGdk/KdEE6lz6uQq07FTSqLtPEQWePzBiuA -1wfP78b2AH6vyJKq36EfMCJK2i7rpwtNz7d9NI5kiLRDB7gesqC94WJ+psEu+ErO -w9DbTV3xdOPs4FGGrR41Hbo8emrk6smhb8+VK2odggi8i2CLAkYupMsuobBlX3CL -hyJPfWDv1aREJ1w7zWVQlJkvp5zR0oXZXpfFxjpj7Ypbp7BKxmh5+WYj8msFDfaD -8VQ+pqgPpdl6zElEq9m5koHjsHH57fMeJQ59HiWpWFur+kQx4QKCAQEA0Jnvbm7R -WypbPDInkIoPDIhyP9Pqv+wMzNfYEnVEG0GhEU/H5aE20a+Dm6u0bsmPm5lCSQsu -EvylTSL3yumQZMincNIUXcPYb2Qye/ZzJnMIibCqwMKQqi4HxCXprWhiEoGPum8A -fN0bTGgMYfM6JZ/Dh1eGsEvemeW+5tn5xZF4Lfp/vkT8v4FuHDydUF/lIx7F5MMi -VteS0hHnR1DuvxHqtysf0wy2l61LFr7mQCMYTNEyFB3ZfXqpxJmFmCqPbr4PQsIm -2rqIDw+13eeoyDpJJkdi+yzHkAYDOdAsur0vOQvK/Zj1QKz9qmC1O6L4BN5yp265 -vjSE4Orvo7btEQKCAQEA9I/afLw6lHUJ4FVL0p7dH15JSFjt7nmGHocE7Wf6Yp3G -vMp+PdGyoJ2KEQB2unnQZK1gZqUuRQLannjNl7fsIiIhHgHxMBCIiylwSUVnP868 -u9/fpJV/cSGze2zF0WAttIgXKNtXG7xMntcY2k+SAe0qjqX494KT0NGnznySt2nU -A1YlkXm6u3KCOJrBKfbtiHXFoH39sA+ihuPiV7xcETS2ZrFdAX9M422p4yDHqe/0 -dTe18wIxJNiEX4xp/HRE//cuQ5dw/Z/QmNrzgWxHbOmXVR5C90vIJRuYY9xz0tDP -LMnifSKfnG16l2gqg7zb8xsxYqSGndXWKPAeiq3/EwKCAQEAhCWQbWgcjmFFzNuE -/ubG48yoe9DW/OAft8Dg68iH7bBkxd/BpbG8VZeXiw16T1i29f5f5IAFnxeX7EbD -rTLLO1113V3ocwH3YZGa/bbBedETzo4xjc1z8asZVmQiJa1ju4+CKrvZFkDH415i -wcZgxqbwKhQDijl1+g52Ii5iMYuXE6GGPVXcu8DVrWOk0N7+/IGpIeOQJG2KYDPh -TOdzZ22FQKY8EeoS3gF0+SLUIDtbUIaR7/Z86iXD2HzdCemkVaZnaoYuMRBL0ybD -sqDn5nguEObWSII0pgN5Fa3QODhS6xOSc5brfx5X0BBVn0L9VbBJ99GIL3t71jRe -vVrL0QKCAQB+jUYZT+ncUqgWruy6g7yW89pmFqagxb/SYjn5g9m8WDq0DPDAmped -p4f/fkbx/gEJZ/I/i3BjA7QPVyHERcdqblDGz2h4X8XYhUv2jnR8P0XIznNTHo1B -BJh04PeIfgWIqveZC8+KqajYdSQGLDC40Ho6MMahha9p2mPEZRAi2x97zoNIQT6Q -qxOZqPMV/RIzkAYBI9E33w9ST/AbSHw35xgQEe23zaEC+wdzYc4QMPxF/9smcdbu -YyA0tVtO6PefoNAO5/nvNFjkEED7kwVu5X2K7Urn3w4lrZ7w5e4FhEoAukN6T4Va -lAhg+uUtIHiM12B50/tZB4N30bFsP9eDAoIBAHc7ppfpo1aDK3bDr6zTSOU4Mn1l -XrfhBJHDy2Wt9WkvWtcCtXr3sDpthaChueV+mGoKvfgWyzUoauO6HDDsRYriqaQB -cXclVjyy+3atY32Opz9rnWefQkbgTOQ+oQgOzEFhxNS+11Omc6ZZ9s31N6TZi/Yz -rgXzhGrr73DkV6uwiiwkvP8vJxg8AMWKorDIm1myr9wwlK5ogDKSku1DM/y1gvlt -4EA39fqURyqxN9o5Yq+8K1+a/smjGx95M+P8Nke4bMs1+lb7bBXbMaVpC6DLqj8B -eleOZ7adY2mS0CBuf0PNkJRNDwF1B5VDmGBJLubUtGLuUUoEyUbv66WfnUw= +MIIEogIBAAKCAQEAoDx71xRLJqxG0QfDfIZbmUUKedkg35JK/KQA7Ke30ssrNTkb +mB7d6QG3OyUcTWd15iPL90rteQQJAFDRywX4pJpxTlCJl6TyDpu0f3gcPCW0KrUj +x/UTwtAU/qTjzcwplQN402cfNu4r3GwyNg4KslYbKwsecqPMr/7J9U/BOb22ZRVG +p3jGPDWhhjoLuIJ98hafv51AoOMeuf4rKF4jLbji/nLrcBrGA9+Meu28m3PW8+rG +L4XLMMztF9kYiOSULopooebG9SwHSE7hY1v2S3wUG+fE7G4Mp/6tRgpODokrvY7d +NpRq6IS8TZdIpPS9ikuh6nmFz2ez/KHcG67OcQIDAQABAoIBABNXmb9ZtMSjUR0U +adWTRmVW/y+8NQqn1yNuDKqEiF0Kp1mSXjFbsH/a9CpQjX0Oex3fvlRImCfeg9Ok +7d4rB1ufRQQmFqXWhF2dEAm/DvF3v6rUGNCfVdZTVeVzNAh4l6BkPeaO8SapU2QV +L250/XePi1ID0pYWDbRE9k4FZZa5je3mTctn3s1PHp6xxQdyDHfxZmCZImwZcErj +joBoQldvUUfjqXCY9SgRJ/MQSNeJoJvPwXmYokpqxfv2sP+JlQgXEcO3Ihj9IkGx +avMFR3yGdWWLxmE3zzypXvFI+My0E035fEjcObspVOgqxJJUCWLSwWtVAo9shFgO +fPnfv70CgYEAxqVNQ4eEf8HRDN7Ygr9yruqN5NxXKJKBqOT+OlTAiCtrm6iRFkR/ +WOFA3Ewjk5dxnVBvXHhTZoS2yfIVj8Pz7wbcoigfT+ia4JcAW8xQTs1CV/Xz8JsN +ChUH3ee2POue/AAxf25yDBGH3fKq34aqL9WNDmaUz+hDCo4r3/hfVZ8CgYEAzoAv +tBxwE/VUwkmWzv40WI9J4GSh7lYo4d8Z2TR6FRSxgb0Uf3C3GiGKuLf9EMilL3ae +i/Dsb0CVn2sfLdSNFlxj1l8V4R8JfXST2Tn4g1pv6Hs3LEXJtlncg5/1DiMtfrqW +quJtKuv8xO+5rbfqtmMYduf4ELkwg1uJJBc/we8CgYBZkUMrRbl6mXuXIAvjuEsP +j3b3UFqEUrrf2pC+4GQHgfx9LR5uOehpvPcv3azU6Z4y3oe33BFO0lxQ5jTOo/4j +Mqbc/tZPg4QB7FQfEBrNzUMywhWB0Yepmh338nh7M4p1+ehXmwcVZforGzXsn52w +/8sgSSSkMge4hK5HyIfD5QKBgHVr6rROH2UZ8dJwqfKWFgntoKKaVoICOEkH5dje +wDTQiYcuj0NQQq33OLyE0sACd/ufRdRpcOhqHyqBbT9QR9HZQ2QYuYZDcdAGxDOX +hTqb6FqYBe2E2Yh5XKzz/hLF6g7P5vDQxCbN/fO2JS0lEbAYdUbX7PUFeRKYsEj3 +d2e9AoGAMrejS2Ic64k2I8VyYapEJ1SUaCeNCj7yR67QVtXJWvmYeu9tsUy9bxGC +FmZuEIUnQV5KZUCKG26GKq/0NiT0Umc38zlUSJzDVM9LUHEt5K066RhVEBp3Fds5 +VIGgI1BkHeMKfhve0wwAbFECL+rzC9ihb6uNxZywlfeyfKN6ga8= -----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem b/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem index ef030f74..06d3824a 100644 --- a/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem +++ b/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem @@ -1,29 +1,18 @@ -----BEGIN CERTIFICATE----- -MIIE9DCCAt6gAwIBAgIQb58oJ+9SvWUCcYWA+L1oiTALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw -NTUwMFoXDTE4MDUxMDIwNTUwMFowEzERMA8GA1UEChMIUXVpY2tUTFMwggIiMA0G -CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDDmOL3EhBm4So3agPMmF0z1+/nPlrE -xoG7x0HYPk5CP3PF3TNVk3ArBPkMzge0/895a4ZEb9j+LUQEjOZa/ZwuLmSjfJSt -9xTXI1ldp8KasyzQZjC33/bUj7FGxGzgbHyJrGGBoH2W5HdswH4WzhCnGTslyiDo -VN4hklJ7gr+Geq3TPf8Eji+1L71MOrUyoNp7BaQBQT/gKxK0nV+ZuSk6eaiu+om7 -slp3x4bc21o7eIMmNXggJP6p9fMDctnioKhAPcm+5ADiFYSjivLeUQ85VkMTpmdU -yvq6ziK3Ls6erD+S3xLvcHYAaeu84qLd7qdPwkHMTQsDpO4vPMIwL8piMzZV+kwL -Bq+5xk5//FwnQH0pSo2Nr4vRn+DITZc3GKyGUJQoOUgAdfGNskTt8GXa4IsHn5iw -zr12vGaxb//GDm0RLHnh7NVbD8xxDHIJq+fJNFb7MdXa8v31PYebkWuaPhYt6HQC -I/D81zwcJIOGfzNITS2ifM5tvMaUXireo4pLC2v2aSY6RrPq1owlB6jGFwGwZSAF -O6rxSqWO1gLfhJLzqcw/NjWnO7nCZEs/iKgAa22K2CtTt3dDMTvSBYKdkRe/FYQC -MCa7MFJSaH85pYRzoDN4IuVpvROrtuQmlI47oZzb64uCPoA4A8AN+k8iysqITsgK -1m8ePPXhbu4YlwIDAQABozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYI -KwYBBQUHAwIwDAYDVR0TAQH/BAIwADALBgkqhkiG9w0BAQsDggIBALSgrCdEQd3I -vb/FNkNZkAwdjfBD6j7ZtPBwvjEiiyNTx9hOLBGvbey7kr0HtW0KkLWsdRmCc+3z -ev9I5VjDOtpiqrvuAA1wRBaL3UzGyj/eFjPJpvkfJi8zjkIZ2y18QG3yJ6Eqy6dD -0aIQAHl9hkXMOVrf364gf0p7EoOGtSlfQ56yIGDPTFKKiy+Al0S42p17lhI4coz9 -zGXE1/SiNeZgdsk4zHDqhzzBp8foZuSL1sGcIXHkG8RtqZ1WvCyIPYRyIjIKZcXd -JCEM//EbgDzQ7VE/jm+hIlYfPjM7fmUzsfii+bIrp/0HGEU3HN++LsA6eQOwWPa/ -PrxKPP36EVXb72QK8C3lmz6y+CHhuuAm0C1b1qmYVEs4eRE21S8eB2l0KUlfOecf -xZ1LWp1agKt6fGqRgcsR3/qO27l8W7hlbFNPeOTgr6NQQkEMRW5OxbnZ58ULXqr3 -gWh8Na3D4+3j53035UBBQUMmeeFfWCvtr5n0+6BTAi62Cwwu9QQQBM/2f9/9K+B7 -cW0xPYtczm+VwJL6/rDtNN9xPWitxab1dkZp2XcHG3VWtYvE2R2EtEoKvvCLPggx -zcafsZfcD1wlvtQF7YjykGJnMa0SB0GBl9SQtvGc8PkP39yXHqXZhIoo3fp4qm9v -RfbdpOr8p/Ks34ZqQPukFwpM1s/6aicF +MIIC+DCCAeCgAwIBAgIQTCXTJncsLpgueaMqQF6AiTANBgkqhkiG9w0BAQsFADAm +MREwDwYDVQQKEwhRdWlja1RMUzERMA8GA1UEAxMIUXVpY2tUTFMwHhcNMTgwNTIx +MjI1NzI4WhcNMjgwODI2MjI1NzI4WjATMREwDwYDVQQKEwhRdWlja1RMUzCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL0fYn9wE7phMA6CFT6gv7mDpzSB +LkebCxj3LfU/isdgXvtXUn+BKIolvav7oJyTyz1R0NzX5uXxEERMBUW89KWvPLPK +o3d47MWMcAgiYx2+FeGZo1cjq3IRVKyg3WRVw2rO0YNL3K1QCS93A+IdA/05muwt +346XJ2FV0tPmETn6t+So2e9ZXh+uJjcCHq4XpJAJznCwemzzRpDe7nG5sYZqq+Oz +zBQ/bTC8rOdqW5woH/GhQHYHcKf1taPLmDLczVPQCqS3LAEK5EOUElfpQykfkZI4 +clOZBhJ0e5zNEBTB/XRd7uuUA57Ig58l7hbX0fUPHgS9MF1z9CXJ40BSm/sCAwEA +AaM1MDMwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMAwGA1Ud +EwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggEBAHKH54KZdpvcLRIJK4yeSqwOigYp +0NHM9U8RlHjmf5Tp9lCtZpVrkfUtg9rXytekAXfd6GaNex7swTMNPnJBGgaQ2vA8 +0jdtKfe6AcHTYQV1rs0qunlR8i26cNhYblKPJjYYA6FBzTTtybXhHYG9xvYpSVpo +XcrsC81DYK6nMiQMRYuT7RO/rtI4Tzx+laYc0lYgBzf6pXUjXycgAuJ5+cWT8DDn +OxPXbfAxfzc6jYfsigwzdOCnuIomFogm8ad47ApTTTLFrVtqCNJAKCu7HufEbB2G +OKWvl9NmTPYetS6MO5LqLAWcf/uRPn+lufHeTfBWIDD5zbJ2+ATP+mQQ2d0= -----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem b/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem index 5aee3ea5..3d03fd1f 100644 --- a/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem +++ b/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem @@ -1,51 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAw5ji9xIQZuEqN2oDzJhdM9fv5z5axMaBu8dB2D5OQj9zxd0z -VZNwKwT5DM4HtP/PeWuGRG/Y/i1EBIzmWv2cLi5ko3yUrfcU1yNZXafCmrMs0GYw -t9/21I+xRsRs4Gx8iaxhgaB9luR3bMB+Fs4Qpxk7Jcog6FTeIZJSe4K/hnqt0z3/ -BI4vtS+9TDq1MqDaewWkAUE/4CsStJ1fmbkpOnmorvqJu7Jad8eG3NtaO3iDJjV4 -ICT+qfXzA3LZ4qCoQD3JvuQA4hWEo4ry3lEPOVZDE6ZnVMr6us4ity7Onqw/kt8S -73B2AGnrvOKi3e6nT8JBzE0LA6TuLzzCMC/KYjM2VfpMCwavucZOf/xcJ0B9KUqN -ja+L0Z/gyE2XNxishlCUKDlIAHXxjbJE7fBl2uCLB5+YsM69drxmsW//xg5tESx5 -4ezVWw/McQxyCavnyTRW+zHV2vL99T2Hm5Frmj4WLeh0AiPw/Nc8HCSDhn8zSE0t -onzObbzGlF4q3qOKSwtr9mkmOkaz6taMJQeoxhcBsGUgBTuq8UqljtYC34SS86nM -PzY1pzu5wmRLP4ioAGttitgrU7d3QzE70gWCnZEXvxWEAjAmuzBSUmh/OaWEc6Az -eCLlab0Tq7bkJpSOO6Gc2+uLgj6AOAPADfpPIsrKiE7ICtZvHjz14W7uGJcCAwEA -AQKCAgBmIvmxpp8l+cH/ub5OIenZXpMJn4fqZPXtxjjd4HshIN0ln0JlF15lOG2M -gDGKFGKUts8gAX/ACocQETtgnDnn65XlwPIqfXFGflD2FNoLyjBGinY6LhtIF9is -aXmpHz1Q7tDjzZiHKLor8cBlzCjp+MToEMpqR5bO1Qd5M2cro/gM7Lyz9kN3S3x/ -x9BCpbgwsVtYxGfEePmFkwAO159tx4WMCYvOlW2kSm5j+a7+iwmA9D7MGkVZHvNN -A7Y/H0F8ekdVBN5pMG9Yrv/vk0ht2lugcS5YGr4eufFq0mhWdv+jhBTxLzqPMMBG -m9oMJcj8XyXYtwpfVsqBpCqK2wnEnv4Kf0rZzBU706nI2mjPXx3dL+5qo8uQJKNp -mxoS7vmHV5RIJgtdvyzGFHjdfu1leowhV+Jy9jWzMw4wlnmlxsfDECf5RoSf2XGt -SMGJb0dbJKae+W4MfNUFsgAWMZk3h3KF8AHHe44OpDbQeoh3JLnkWSG0oS3CR0ch -68TzCy0SZZEZ9IS+I6o5WVpwWfReCQ5NjaKipWcpiJvxg+Dc3GG3QcVXVz2gGrJh -g9v0v6eyeOJ32QGvvP7THFBjpWeeHlXT8Yz6hFcPrvErEZ029TEmhg8aLWBGfsR5 -F1bazdbqvOSEB9vBAAaddNnEDG9Rl8EmC4WdsnVgYUw1J7gfQQKCAQEA9DKjD9eN -CrUl/2YfSm2WaFhYci74XcHDVeAXN2SbOyKbMIqk3aOFQNRAsLRnwPkdiLtuqeDK -BafrfLTCORHfFdYKnUzmuekESNLckN9VyLztgqOqNAv3LD6GmSHBaJEnUyniLxOL -k0wMEBIsEQw7Fb4blM2REYJ3ZzMFmgpRGnIX8KcxhW9XgSrnqMLO0w6mVxjo7xzd -813nCcNrGhySM/EzKYtTNHy2JZmMH5QFHaIj67KklO7VeEZX5U+TKveBEt4rmHqs -Ndqf/djSs8vu1xse82pVRxMXX2mhDLmwjUjPgWYxUL92jTiyJhE7GxpVB/yHgF1J -Ecb47MDahoNKkQKCAQEAzQzvCOA77IQpGO117GcMqcjzwEUhTytojFBT+s5mHfzk -dYr5TyN86LQ7/GktNoJ5oRvD9UGRSul1OGneivqtWj6mv6/Zvfzacx8NXY4MYFs1 -nEr3Gr7orVFIzD2x7nMPG2G6+J6hZ1rhpnZ9Hprf5G41sHIJxHJ9wTYSUAmFh8bv -FiJqF90bSq/E5hgjphtX6wZWeZYspzc/5+IrJ/I0nqoxV3rjUy234zlzKJAV10sV -5oVgxLLQsUujkHp/Da+ij2aTv1Za8y3PTJ7MAHYgdpa5l/4U9MnPUEB2REBCI1NN -TqxnViwD0xgsvxfb79UzruLJIYOCKvfOumlutXM0pwKCAQBUIMXQhWAP2kyW6mXJ -TGvO0vDVlZz3H/Pdt/AHo19fRhLU7E7UFKupo/YNanl8H9au7nO3jrvKqwkT02o+ -IwwKB81sV7v9PGu/cvWN64MwPvZMVXojqCOlWH0icGCjV66Glh1YPpGNU1ushbYs -wVvxp6b04sUhlSLxqMA7S2aZh8j7nX4QDEXHODLLDyIV0Cw6QViuV/GXEDiyQmK5 -gjJUNrp7i4ZExNozpeyCTIpepSde4hKVRJrCbumFFJ8M5GvRRj0asNh3TTRlTbd5 -Pb6w2KUXEwECFW+t7UQQkEBkzDrAx6YhvXRoPqoRN0p3keDNeZBtBrZPq47CccZX -JRAhAoIBAQCJ/DgnGu54XP9i/PksGrSU1Nvi+SJPKoDyW2QIFTj22SXMS7c1oEYA -OrlbRFPeqLK8zfhyZKsnZC8zxVqy37okTqDbwbSfezZt3emamWqOtRJAmNnsr6fY -aii4+JNySQ9Td9LgV69549iRso7EN6iPCfMrR7J29izWBlMQdTfchOyDUqleYbZp -7hpsVLY4o5HoYJ10uLBX3oAsxTARc5YhZ5pIqjOr18o1KIXsN/napXaZaAwUkdiK -VsI9CZHSXezg30Bxs+UEXEFx6DKT5Oo3o3pFZAAqMlxGPvrXNv7K0tXlKXNos7nn -Jg+GkMG6hRiAibCb0umXjKcbHrQXeu1lAoIBAQDcRBsy6cSQXMSu6+PyroH+2DvR -4fuiMfSrUNjv+9K8gtjYLetrZUvRuFT3A/KzDrALKyTFTGJk3YlpTaC5iNKd+QK8 -6RBJRYeYV16fpX/2ak/8MgfB2gdW//pE0eFjw+qakcUXmo957m7dUXbOrw1VNAET -LVBeVnml+2FUj0sTXGwHKcINPR78PWZ8i1ka9DptnKLBNeA+x+OMkCA88RJJegSk -/rgDDV52z4fJHQJh9TZ7zLAXxGgDFYLGPTrdeT+D/owuPXF+SCP4pMtVnwbQgH9G -dfQ9bb7G14vAeu/kEkFdGFEreS09BOTRbTfzFjFdDvSV4JyOXe9i/sUDxf9R +MIIEowIBAAKCAQEAvR9if3ATumEwDoIVPqC/uYOnNIEuR5sLGPct9T+Kx2Be+1dS +f4EoiiW9q/ugnJPLPVHQ3Nfm5fEQREwFRbz0pa88s8qjd3jsxYxwCCJjHb4V4Zmj +VyOrchFUrKDdZFXDas7Rg0vcrVAJL3cD4h0D/Tma7C3fjpcnYVXS0+YROfq35KjZ +71leH64mNwIerhekkAnOcLB6bPNGkN7ucbmxhmqr47PMFD9tMLys52pbnCgf8aFA +dgdwp/W1o8uYMtzNU9AKpLcsAQrkQ5QSV+lDKR+RkjhyU5kGEnR7nM0QFMH9dF3u +65QDnsiDnyXuFtfR9Q8eBL0wXXP0JcnjQFKb+wIDAQABAoIBAGQFxk1KFFT9c7Io +oF3IHL5b38HIFJbwbBUfHaJYoehCktlxXINs5ujxfvgHk/FbxSDANaunUEoKjaTh +Y+R3RBigroUURhI41VjBprrWnP8s+lufqyC6D8G7YsIOLikTps/FZE+Bfsv2yXTe +CCK9X8+8eLAyrsq2LLCw+Fjzk+bKRj+zE1bUR2MqNYtRNOFizDR0DCy/f+OltmhR +MVTQgA4hAWyCXc3c07zJ3YMiVMHBIGX3hiwEGhzgKtS8vQ7isW21StGLsMQlvUgt +AjrVzzsacCSzuL+QZoZtZl3E7V/Mko0bKNeOz2ouoWTKxInlzget+b+zE39+1WZO +T/X54gkCgYEAx5sI73letGuk9DOopwKLokj0Qdj3f5VRb3yJqbp3YkLTeayyRAwD +3KY+NwSDGLqj/IcG5DN/ZtLbbhiI2F3oPcJG8QyVqmsfzF7aW3RaBBt6gFN6IdQ9 +SO0pS28bj3PVLqPqx3gXHZ3l9WRgj5mbl6yvoICiymMMKajOgKi0sTcCgYEA8o4j ++0HFhxcLvPz8GCynSarMXaZe/mEImURq8ObH2KSgBogD5mCA3IHL4kQSiRyxNoAt +crGr1idsR28UYfX4xprMp3okA9ujAw0hkiNhUh3jf3ZywvQXFkOoSbtwnfAFK83c +CmHy+c4OL9BAXsHvhsRHDCVjfKupqJQwux+9HV0CgYB+FSMmyX6V7qzqiDsPC5+S +Kg0IDvn/QB2Jk5wNdzhz/AxC/mA4dXJ3DRedfx8kHrj5CX3D5feixqxOtfay3VaW +tEJFfxKG7FXQrVW2kR9PGuBdcN1jwwHXL992w78f9SYC6Q2jY+sODTA1umr4KipL +O4xQkRDDUJ9dLUELqgVBLwKBgQC+/CLizQgOdZv9hCmvk0FppP3j44M6wwa1QAUA +iIblU8LZQbHobSYp+l2iXL1HjvsOkeC3RaSrLEF7AcDH3Zi0MOFiIa9IBmIVnfpI +Cmmv8e7Wx1pXnUCsfDt/SwLCqWI4+o/+8N8TySasiUqWEhhbQiM7Mhli6fvdzEmO +ndAX1QKBgCKJA25iPkLKw4mFVxAaPIAZnenJXJpuHF9tGzjjcFfioGtvI/1mrePs +PhwoO1qpjzY9brtf47l+vVMSY9KrA1LvudPvTqBtyjQvG5SqsWZSLuyJL30HKeFy +hv9FCsGVcF6wu3S8wXaGC/H8kityxTqFgZQW5whl2D9axJavygKj -----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem b/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem index 53ce0742..e753d93b 100644 --- a/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem +++ b/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem @@ -1,29 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIFCTCCAvOgAwIBAgIQPjclBRGzhznCybQzYRQTyjALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw -NTQ1NloXDTE4MDUxMDIwNTQ1NlowJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV -BAMTCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALBe -C9O6es+mStDowUd1kiM59VkinzzdHgE24LvKmGxQ6fDnnT8S9L7iyzoxcJWlvSHu -pfyZWvij0ZIyRZ288XemTEFYq25RK0IBGGdvYz9OqT2R3lblBQrXDjSi9WG16sGx -60MGhM2egGMqFQ5DBfT16IKw00+RjFgCVzJ8T64Lzw82E0e7d6hl39SPybY+uvrt -SID60hYGmXoOdaiC9qquivks67BZprGNfORrvyJNrCFI6oKUFWHrQ1PpGd2tOwJN -1P3gkkS8pVlAif6ZQkAf+zuKu+l4j5tKxGlJAkJsafVJDLOxBKutUj5msha0g6uJ -gFXUe0+G8hkNcEjd8XqUUCwIOY3pdv4WsydKBk3uH9zMnYolw53k1q0ObvoY1NXf -beMxHQAtDi7nfQGlae9cuuOSymy95WuvzfhZFKdPWUe8lKN9QXFIWVoCFnOm8T3P -+FNCUE+p8DIWkal6Ul9THi/Kz4p7twyrUp1LwT5EtSaJ3iGAmB9I+8/1vmZT3lPi -nX8P+iVGM5yOUnptrsFm0bUcJWRD6iaTK1KxpH+Is4h2kiUiSz1tC/9bKaJYN2o9 -oy7q7+ZVfHSmIxLo8ZFYsaZBcXi96cKuuPMR3X4ISPwKDqP5irxU/QbI+YQBMshg -G4b0BNoMZ50g30r3Hcsifw4pzPQF0RDMOBeCiOi3AgMBAAGjNjA0MA4GA1UdDwEB -/wQEAwIAoDAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDALBgkq -hkiG9w0BAQsDggIBAFuS/VrMNUwEMyUIktDyna5ExYh/FDOE+YEYf8tsX7dSMhRK -wE560/AcVZcbKKAZOnZ/262a++8tparsQt+bXBJ2so6YUqsFDNdOLCI2aShjWDRe -TNhqmLIO3FNsLRKp96WHVz+jFoiECsoYfKn0jgqTqxx+7nWFqgBaNSlF5cbCgLCH -jQV1uQhzsw/Mh/32hXAidkv/nLeLf7FbKq08hgthtoP+XstlzZ5BxkPodjb8XWXG -DSS49SWX971GHa1apwMKfxVGSppxn18ZwEmW1BUfQBNxtMytqA9DK3+xuoUdXkB0 -iJbm3Jc10JSRju8iyL121Xt6f8O33paVz/ndDJIWztUOjnItc89rxHsINPt5+cUt -jix8ohwmHGDrK7ZooXBvotvmGT/xhPr2eHUAG8JuSJ/Cr09UUOwUEigz4CfgJOHm -XukdzjOkb4r7lhNmVeGqrjRol1W0Wsc1NGH++J6xdkIeQ+i23kHwFHfQWV/J69tm -rOn2N+qijtmbIy9YfVcrFDtUtEAzXylZ2StCVQNofd0M7tXNdrUL8yAFwlrhWGJV -wsSP++1xH2Ie6Diupy8z6rbP383HmnmVPU/UecgLrlX2lEpt/UZkkX1Xm+6PhrrT -HDeeULvqtUP3PD8wS0C873Pl9GXOKISqf0HKEIDUAVZhQOsGFqiZH0388M4L +MIIDDjCCAfagAwIBAgIRAI0Dt8LVd8cJPc0dv5aW+wcwDQYJKoZIhvcNAQELBQAw +JjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE4MDUy +MTIyNTcyN1oXDTI4MDgyNjIyNTcyN1owJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQ +BgNVBAMTCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANr32CUXFUCW1c2oPoHjq76T8jUTH/cxPiR5NabJ1y4gMCBko2rIe+TblW9UclxH +911gjfpSAxFtNf+lX5kwmAMHhU8pcCc+Mjp3Ax9acFXSXvzzTDg+xj0NGig6OBk3 +jyPuO92lM8A4qs0mBZ/T04iLkawLmdRXViRoGK/T7Df8HN+hm7UsG0VO3GxFgSST +YhhKTu6JMTADszbIFPOvBxGCUNhffXiLNyviO4AiBdcAv2v0SUadEPmSGm5Jb1DK +tfKY0jWi1k1zNSqzit/bhML/EHbVkYJ00QmH50MBTunpz60gIgHjt48nzJarLDML +oRFMppG9XIBQlUn3lo0gVwcCAwEAAaM2MDQwDgYDVR0PAQH/BAQDAgWgMAwGA1Ud +EwEB/wQCMAAwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IB +AQAb388owui+O9vUle+A99FXwcMDEb0OILc0lBXVWx8q5ZE73vcanxyAcfOsZYRY +Lh7G6VtJwC9xVjAdNwJ1gd+ak1l0/Rhs1V0JZ12/wOvAOQ7+9g2lRc1IedOh3EIh +d3BMI4RdDB/BnnK3XjkggYQZK3yiAOavmmsZxAOl/apzjF+5u8XjuydMmotE2NYw +IpM93zE5wWXqzYs/Kmyy7zAcHKfvq9xej/gMCFEvO6lopmwyslBLPpPNHwyfIVtA +mspm2OZhdmpRJYGzkR4wK5NjoRl2O11uzlMRDckp0GSZ0x6TGxmb7ot5HK27p3ep +6LPZM1wJIwuYHIP74eH0ctQP -----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem b/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem index 636dc828..e8c51865 100644 --- a/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem +++ b/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem @@ -1,51 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKgIBAAKCAgEAsF4L07p6z6ZK0OjBR3WSIzn1WSKfPN0eATbgu8qYbFDp8Oed -PxL0vuLLOjFwlaW9Ie6l/Jla+KPRkjJFnbzxd6ZMQVirblErQgEYZ29jP06pPZHe -VuUFCtcONKL1YbXqwbHrQwaEzZ6AYyoVDkMF9PXogrDTT5GMWAJXMnxPrgvPDzYT -R7t3qGXf1I/Jtj66+u1IgPrSFgaZeg51qIL2qq6K+SzrsFmmsY185Gu/Ik2sIUjq -gpQVYetDU+kZ3a07Ak3U/eCSRLylWUCJ/plCQB/7O4q76XiPm0rEaUkCQmxp9UkM -s7EEq61SPmayFrSDq4mAVdR7T4byGQ1wSN3xepRQLAg5jel2/hazJ0oGTe4f3Myd -iiXDneTWrQ5u+hjU1d9t4zEdAC0OLud9AaVp71y645LKbL3la6/N+FkUp09ZR7yU -o31BcUhZWgIWc6bxPc/4U0JQT6nwMhaRqXpSX1MeL8rPinu3DKtSnUvBPkS1Jone -IYCYH0j7z/W+ZlPeU+Kdfw/6JUYznI5Sem2uwWbRtRwlZEPqJpMrUrGkf4iziHaS -JSJLPW0L/1spolg3aj2jLurv5lV8dKYjEujxkVixpkFxeL3pwq648xHdfghI/AoO -o/mKvFT9Bsj5hAEyyGAbhvQE2gxnnSDfSvcdyyJ/DinM9AXREMw4F4KI6LcCAwEA -AQKCAgEAnrHg/oD7ZMEC7PuifoRCHMRYCf5nPkLQbtNMYG2pvT0JY6VlDo4l/2Te -7NvzrBPYHSI55RKwkq4FMwFdNtP+imTulJYOm1MaE2gc52WI7jv/eNE6OQIWCWz8 -8Uv4dBVWyTcos8S31rTaXWBOVejlAUgMERy+5wfWOpLQlzLYF4m0pMFJk/AReUtB -nmhLXlsPsB22cag/RWZmzzcXk6tT/LzVe+R5ptLkdTsUuAxjjaBKVCDiMuDAZL1m -dah3h8oKIMab8l0SABumxKqYAKkyvbSJQUhSUYAT5+3c0cfJ6q7WoMk8TqvnwfpQ -2Klbcaa4G6+79H8e/a41RWmcMVTTpLKmwzx/iMLPswLnTFbWYCsLSsml3OpmXPhG -CKdbIWMvNMBfahZmnCP2pNcZBVY1/k/lEw25ehtnWqA7HplawT6V3gk/Bzz+3e3R -XEpioZF70ipdW5Pb3OG/tKSNDvRRjqLPk9UWlQzmedzu7XN28V/blw/CBVcMAcc0 -njwAledTuqv/wQ67dtbXdcxSPZbV/Rq7y3OmpgK6RWLIFzzpOPW5gULqUZfrnxtv -StxVnlZXhFoymodFobTi7AYibsLaXLkunZWXEwFwdtLfFHznfHq/rHfBmna1lcKW -MgWRqsbaoCsqHC1nc0E4llFkn3zqGYgMQNBeqNfX6cIPI/eQzPECggEBAOk0TP8N -edIFENOrzUtpH1fB3k15heeA84SeBhj8t/xrphR3o+IVO/GtMtq9hVLeYFVPwWCi -Mmy4KhwNUOtFeCSX4MbpiXvoPEjL3QF+Sv95HsEWsT1iBQIN4aoV0ZSv48YsRczs -tLjr96hADLTMfpCwyRq9r8XVF/hnx7vqOoOC/J1kteRhjOWRnutFpdAMfkFgzUa9 -1unmDHsDifcT+vpxief9Q9zK9xMYvYmwFkBUjOlhC7WchZC20nrwvM+A2mMBpeLB -WSRWsYeOqW8zcQNGdWuXXMKxsYHwv9tXbANVWxs1gz4x7BxcFoN5poIFrnT+eImY -EwhGrKR6jZsKF00CggEBAMGbdZU0+yvxL2tAul5RGAqv9xhdUV4eg8warTQ8/RWt -8Vef2wllBYnP48rXNDovb7ZNOjMBdjIWZ2zq2McMtHqpzP+zWQWaNT8/7Zi24JTL -y4G75kZdGgTPG2Y71seZoZGxfOu4gf7cLKOqxiHYrNDHEDl5Pi13tJD/8qf6hYm6 -K3yALSv+QlM3mk+5oueKQ7Lj9rV81YomYSV5+K+WhszhvLmuxv0necOLKapeBWvL -GQ5038yAq3PFdu0HXzyA6L8YdusP1d3sqwQvLbi8KAMXJCeT6WZXGYgX2Rjfbuih -ZHUaE7Ac0EsJfMuOowSkS7oXuT81k64ngCoq5KZC5hMCggEBAKYkt9JiZG8HYuSb -GsjmHQllup5RvN+hVF0gRFHbAq2YeBtO3Xg+DpXxAjErIuhWPCWri6bwB6LDVmTj -68milaTke6TbTzLy0rg+Xbcppf766LlCFIYZ5l1/TE3j+4vGAC347sW/wkWY/7lj -4GmS43zsJmqhx6/XUJuOPJOZnZSCZr0vuhL6mOoZZDJUTXy62dx0PetvZsT/O9cM -P2fDWWTCLTEVlBqik4KMdsS4qjGsyzOeCzyZReNDDRO/nZTsRSqSSwARJhQom5Rr -RDVQXeyqbw93KAQhmshroBSB5Rc+4YiyCE3wPTo7NWL38XPi3lbF0VSd/rk/uNH5 -6hcSCmUCggEAIPHjQFCTrRaNiyKolAQYozjuQyceAXYP11tyvcDjEB1ZRB/flemq -15iYmpukN4J67/qUPLmy8zL8xnvwB28SBw195MUQEPP8u5aVR7dW3/sN1jWzKaYO -F2Nmti7YjX6HD9Oz/iiXdlbhAbi9nmTQg3ZcPGt1OSd1gncLQ6pNrvIPFFB7X1EU -2DRN/eMI5X2Rp49DG/7yF2AQh+AJgVeL+LEw/CfRlKJzBeNYY7U8Fuuoh907eAEt -K7YeVpc6jYEiGeJ/2eAH9IuhTkT48saRyHTXoiR5QwDvR0lHmAPtS4irH4Igd4dv -qlUi90B+XPvYJwKCc08aojf2hzZlUiVwIQKCAQEAraCoWea8hLFchxmAiBt7joIg -nNK7a3LOHYxT1gB9H+PoVqTmzGVTeZpD8Jnis/UHmDhRYuUGqvFIefjAWbz0jJAN -t6RMAozENCG1PoeXHf1gt2wspv14kza+8jSdpzNrzZgPZdb7Wh1UEqUkiRYwn87f -C7DHknqCj9S2qq0DFXYz15JNPVrbvD+ZLBFJhTAjppS9TuYQVLf8JPYHpLRio/9A -dMsyOz1VA2RRYN0u/u4ccxiN45K3PbVMCeDPbWXNm8G75YKQ7LnIuehMB1qkZy6N -MOnNGp3l/ZkFK0JsW/pZqTQ2FqSkb0+ttTFApFI3qB04sc4s0uKPI9fa0OQtUw== +MIIEpQIBAAKCAQEA2vfYJRcVQJbVzag+geOrvpPyNRMf9zE+JHk1psnXLiAwIGSj +ash75NuVb1RyXEf3XWCN+lIDEW01/6VfmTCYAweFTylwJz4yOncDH1pwVdJe/PNM +OD7GPQ0aKDo4GTePI+473aUzwDiqzSYFn9PTiIuRrAuZ1FdWJGgYr9PsN/wc36Gb +tSwbRU7cbEWBJJNiGEpO7okxMAOzNsgU868HEYJQ2F99eIs3K+I7gCIF1wC/a/RJ +Rp0Q+ZIabklvUMq18pjSNaLWTXM1KrOK39uEwv8QdtWRgnTRCYfnQwFO6enPrSAi +AeO3jyfMlqssMwuhEUymkb1cgFCVSfeWjSBXBwIDAQABAoIBAGQMCf4oZdV1FYs5 +7BV86OPSxT/q1Rgkr7gKibEDWAYDPvoOAXywzarriYOsmfQADc3kZ/qPrkcwFxQP +g3aC9XGs5gQdctj7WgfMiOiycdFEpZH9uD2asQkEC4eF0kvzTrukBkZnTRXuzlud +m8RDDMu+uXhadJbIsNtBlMYBllSdS+LFxXcAYm+IsvTYzmwg4+bnjvOwMHO9SMSb +1dfgOLkg/A++/GTjD/kUyCV5dc4lv2I0i2pXJkD2V0Dr6Yra1U/MRKcOwTGC2q/8 +hZuKm9DgvGXvZsG0+yT5fsexGRwTxmByvfj+QMF3LCTDCknD4d/mmEEX0EEGPlW2 +I7OgKEECgYEA/LkdwnXy7ymis1Rgjumc3ydcLoCqV3ExaxXrvO50EkRpgRX/TLEk +j98iVYyksiaJuMhqnxNttT6GwWJvwIXFPP9WpIGmzi4GKyqYGEX4WbyPoY9hjt/G +muR67cTXg6ssiSssUCoQnWIHyuGDJfzRWqnoei0dIA2GobOwFJtXeV0CgYEA3c6u +utbNtmbyp17Jffx01ee8Wprhnoz7Nh/dJMLngpIx3i8qQqpFB8TPNUTu+GLgGcol +n9BDzZszoVhsxybn7Lgm/OjS/jQL4hosFoqztThkg28L8UD7QB0TyCucwgk2lgOe +VxyX25kNSXzxdCYaKr1+6g2gtBAb0zPj2E+5t7MCgYEAimoA6J6dHWwaVkmiUOOW +LYprLHT/1sCCJnptEJ8xJ0gc2LxphWGH+txk+6H6GjCNQY1TCCkl7xx9xbDaMAGU +E2Jt28++wjHm4wGDJ9g6uztRF1VmQ1BAgFkfEta6irzXuZDRxl4jl283gWCd6dJb +/2ILl87ZotKFqE6347Fo6WkCgYEAyDNyMMALIzTelkUO1wFUL3If5yPeuy4C3IJ8 +J18oeQkdq66klVF8RxvT7v/ONjGAlqaHuSzQ1jbcrifS3xp1wYsh3asELl+pziXT +X3FH7Sz+REep3tLJNMBKB6WdsuF//H09oOD1DEej342/nhd6DNPHRtiQEZZslwBC +Cg9D0NMCgYEArNksPSQJSxXqxZsw17OTqQJnf3kNBI0SP9q6Wc8gN69r5YQcIHcr +KgtfdiL4LawZFie6gcNu398ng7VYUzzkYR9j+G5qPetcqllQZeVc6cieUyR7Eul0 +WvtlUECCfweLFUsIhuHyEsGu1PrFYd98SlOzt24utguFss1539cEC3A= -----END RSA PRIVATE KEY----- diff --git a/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem b/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem index 08c393d1..9aeeeffd 100644 --- a/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem +++ b/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem @@ -1,30 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIFETCCAvugAwIBAgIQCnqSQalw9ytL5bHLgHZe+jALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw -NTQ1OFoXDTE4MDUxMDIwNTQ1OFowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV -BAMTDWxvY2FscmVnaXN0cnkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQC9gvT3cwz0Ih9+7Ilv5lc15HsEiSmEMh4nOMZrSaamKgf/ydCiGo3DQapr/XDK -FHMLKq68AxwfOlzmEFQ4d9umpPMQ2+4GBr0VG23ppGtQApIPHgD06S0/CeHmDIXN -FXcKybPX/9KbgNkXBWbbJkJy0EcsdP8VJD50Q2WH89nvgEYJNFuKEELD3iGY6bBF -jeDTle5jYA7CgBKvD2avn31g24Qhxn8n8/BdYO/U0kw0qmoy1veLOjCAW0os0jkM -NlKrFpyHEWNj5B3X6UgSn8EGQaVbDq17PrQwlHJYU4nih0TnD1OwvBnFnd27pXjr -68eGA6Zc2BbUnhNGhppWHZ46LpPxpIbafSOH3ES3N/MZAfcUKIUntLlWE2xCQgFV -TW95WeVtP/r1aWgIHu0E2Jb2eHCE+qXYqJxSU7S4DcknmmcTS69hzyHs+92Ec+7Q -m0aQFZ0dyPoYPwXMgZpTAIuXEGg/FKC1fiS/deTW37DyvB2jppehKW3RJY3uso7R -o9vs6DJx1OdU5XEq9R3n7op61N7PK8Wxmn7TVYHEZHkITVvtucZZd1FNTOrOJaNJ -UnE+FuPK1Mrff+jz666Ru4zQL0CondOamX3QR5tuNK6MTqFs87wKY25qsqz7cS27 -kHW+r7UNWbJY3/UQhaPZM78zCZa2IL1nBFUjsFvEA4rtYwIDAQABozowODAOBgNV -HQ8BAf8EBAMCAKAwDAYDVR0TAQH/BAIwADAYBgNVHREEETAPgg1sb2NhbHJlZ2lz -dHJ5MAsGCSqGSIb3DQEBCwOCAgEAHVGMyoyX4lRzWCDkUjrXkrDZzuv03M2ojW2Q -UL61ejMkTWQW8R4gKrcPHAOJAPKVfGEVOrQH3ZMyxV2HnWrJ7egrn65zOzmLbWSh -O7gdpL6YYjBr218fqJn/8HadXZa4k70JyympYOLojeWSLy3KP03U+y7AMcdE1uG6 -6HJI54ZjBoW/nEyWmMh/mfMz8EN+Mgek48Z9AVaOswbtHtDIXN7XO0jbB3DbY5Yh -prVqVLYAz4sCchGTadj+aEChF5sJkKREDvAew/njC0WGS2TmMJ+V1uVhXV6354mr -edk79YvdwzwDgeYArkprahMtn9eu1aSTfUXsmM5OP5tR4gyFV1kUmTPY1yUd/yO+ -638wV0mWtGbbf6j8dUKeUBCyt2qGg8J80OUeFdvdHMswtaUq951NApX44BinPkbK -moBVQByZ5OEcmMidFC9SqYSUwTQ7uNyWeguhCXav+l3x900YlKnUQgRUZntPwXjs -yc7MXv0j0E86Gme6G1O02zamwkRgr3qOTHu2oQOow/a24fM4HASayLR0Kegt0sh3 -rzk0HRF1mGonf1Ecyyj/3LpHVsgYSckwtJoZLOqtDMn+CKtOCEByssQfD+E9Qe07 -qMyvcwpXUpfqe3ZERbJ10m98Z88VeK/XGt9ptq7HY47n1KL6lx3oyXwZIw8pq928 -89dcqL0= +MIIDFTCCAf2gAwIBAgIQTyBNJlm7fS0yutwdLbhG9zANBgkqhkiG9w0BAQsFADAm +MREwDwYDVQQKEwhRdWlja1RMUzERMA8GA1UEAxMIUXVpY2tUTFMwHhcNMTgwNTIx +MjI1NzI4WhcNMjgwODI2MjI1NzI4WjArMREwDwYDVQQKEwhRdWlja1RMUzEWMBQG +A1UEAxMNbG9jYWxyZWdpc3RyeTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBANSMT7auGdwF63fFdQM9O/EqrX++gnuBQgFa4cZzC7GqsvS90uKTOLuWIA2U +ehgF548EDkZu1z6nRAvoFh5L6B5f1VjiVknzLEPlR+5uPD22kbcxgCrMCRZn+5mK +vJhTUpx18yeBXMhxtPhkGnKaKwGcgeW8O69KM7Mo4HBQqg5656pa+4wkUo7GX2v0 +R4ZqmrS1tlwOgpld8KZKVJ1FNyGEeKQkIYGJKHqgC2/JrXsbzuSZ/4pqf8BHc6Mb +AHU85RlBFVDHFPMtQ7Rg1vrhYzgInKeqXtc2kEAe63nqyYyHxPOUd3vIQX/N4tdB +aH41ffs68Pdtp9GeocTiYyj7KuUCAwEAAaM6MDgwDgYDVR0PAQH/BAQDAgWgMAwG +A1UdEwEB/wQCMAAwGAYDVR0RBBEwD4INbG9jYWxyZWdpc3RyeTANBgkqhkiG9w0B +AQsFAAOCAQEAkjfZvcd5WysbfqGfhPErG7ADWAFJ1bsIDlHVUaEn2/Asr68iJpfF +fqb0fhBkBExPhiLDS+jmL1L86QRNIgyM+7zGCCagKJkl9uNBGXPdS6KxZtY8W8rV +bF/GIYnYUL5pnyrhX4pH2ZnDJpKIAJl8CAZ1VHwErQ5VqnJAX/gGO/eKgiyCciZv +WmmQkhcOo60FwLW+Wi9sLOYD+YAT+VnFrGfak/SDfT78wrmmfg5v05tvFXgJaZLh +JSxRET9D5iT3DIxb+m5GyQAqIH1djh02ybrPJ9j6/+qRQDojIe5qJUL90qIvhwO+ +aSbIL/p+I6//AUMWJvcR7GbXy3xywgmaYw== -----END CERTIFICATE----- diff --git a/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem b/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem index 205511af..4b6894ee 100644 --- a/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem +++ b/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem @@ -1,51 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEAvYL093MM9CIffuyJb+ZXNeR7BIkphDIeJzjGa0mmpioH/8nQ -ohqNw0Gqa/1wyhRzCyquvAMcHzpc5hBUOHfbpqTzENvuBga9FRtt6aRrUAKSDx4A -9OktPwnh5gyFzRV3Csmz1//Sm4DZFwVm2yZCctBHLHT/FSQ+dENlh/PZ74BGCTRb -ihBCw94hmOmwRY3g05XuY2AOwoASrw9mr599YNuEIcZ/J/PwXWDv1NJMNKpqMtb3 -izowgFtKLNI5DDZSqxachxFjY+Qd1+lIEp/BBkGlWw6tez60MJRyWFOJ4odE5w9T -sLwZxZ3du6V46+vHhgOmXNgW1J4TRoaaVh2eOi6T8aSG2n0jh9xEtzfzGQH3FCiF -J7S5VhNsQkIBVU1veVnlbT/69WloCB7tBNiW9nhwhPql2KicUlO0uA3JJ5pnE0uv -Yc8h7PvdhHPu0JtGkBWdHcj6GD8FzIGaUwCLlxBoPxSgtX4kv3Xk1t+w8rwdo6aX -oSlt0SWN7rKO0aPb7OgycdTnVOVxKvUd5+6KetTezyvFsZp+01WBxGR5CE1b7bnG -WXdRTUzqziWjSVJxPhbjytTK33/o8+uukbuM0C9AqJ3Tmpl90EebbjSujE6hbPO8 -CmNuarKs+3Etu5B1vq+1DVmyWN/1EIWj2TO/MwmWtiC9ZwRVI7BbxAOK7WMCAwEA -AQKCAgEArwqno2uEGnbuKnjmVRInmWKpcb4TN8Rm74lUVEKaB76o1s0cxK3MJP6h -H8/e/vg2bqkE7indLsbkiaepcuLaYijXTcomJzDQMw+7zOOOLz/Aku/+qDg8D47c -NXV5nLzn0HIPiEIF0JYJbmcR4veKxqu0Ic8K0QdCHHcn75P/x2Tuy4+twW9Vi76/ -v5KRuxzZ/fTtVKKj32kWWNXb3fltgCoh+GR0jH2XlVh1DVkVBEwnfT/rM5ESvWwU -riOah7ohT1+6QlOAPwKzwfr6FCG000eNKPb8q+p12q0ylHzMzgxtSxJwFb0X/Nzc -snaboyWLjDAQ2I7LP6WmXizznvkKbE9PjW6UGYQ+2XApqp+Hn8tSC5I/gIDlBOOa -psJ4fkRjr8n5+CbHbGmQG736hZcZY/z10TtOQbxeeeuri6oDQ62D4Z07GpWCG2EG -sUakaytZnJkIN79PpfthPZwtStlG0KVs0i5wggH/iP2h0yAmvJ64ZRIqdvuE/aBn -sdfRRlYUqmFOJsVQgtUWGKGS4WIxrGaclzT1TNxCKdiAk0glXe3sDtvBni6qDW07 -iJzEXxrsLw6MiCDhHfDeae5JYeJXK0HlCfYHXgRmEnDFTGw8rBzwz3eXvPqZ5YNt -j+31uHSwQjgOgEgSrXeTmRfLZsytKqndhBB/yBFmzZNrswXGackCggEBAMN5RSdW -t+WWl8ghDGz/CN1oRjnk298/6L7ijluKGRgG+igwBEy+5m1EGPJT+Y5LEH4TiQJe -Oc2XjQuM7zABX7JWWk1cL8Zlv3kcmR0lg4BWs7wDkoU1HYRkMP57vubtxFzFOsNa -momivEniZ/eonHm3yv0VHeenH9j3mhJ3mVDIpkH+7uhn3++c0zYh96NkjfQi1/jF -P35eSAt7FgHDOt37fWXwtGeYFRN4P19ZUNiIvZwT6Q1gmegRO8BYoW6cSbLWe5Cp -abaULds46+mjM4zJhCZRFkdWHbzP4bZHocSmwGsqcpABJ6SASTVim02GGhBIt1nj -fkqa10X1c5Sqis0CggEBAPgxFKSHccfIJ6yht2HJjysRLN/IHlO9hDcpCWUrISN/ -hxu1uxfNGmUkd0H8zDO/O+QAJXLE8PPPB77pJniIJ8kK4swwsfufN6bNV9XJldjA -o4vXnYt9Mpuky9cugD8LocUgWQzzKY5Y875TC4s3ldzyKQVm0NO+Wz1U3gfjogEC -d7PhTk7Ba/ZjVGtL7HuZxlL+/TgZklMks2ulSTW2y8aqVJxaZXv0H0NX/+fpDHYw -iljr+iqbiqZvjrzySryb0XWMtzP9oyDEXTXrWnG+kOIZW3BZ9FLxT+Te7zZ2PUbK -vTkObsKxc8WVHIYgkt/OwWSwbYLre5nvFPvgEFbQuO8CggEAeZTlUXmbul63m5AK -xYS/w88G1x2lMK/0mT4bY4562zoDwJlVI1MdydqwVZGryDiiUnjeIC3xcBISdZu8 -bjR8jFUvp6xuPs2ska0bA0kBCQNkmc3zBY2rBVy4KKFZdRNwrm8yhK3HL1KcIKyF -FEK4yPBrfozy49JMecxP9aqUHu4eky/4828gl04JBUONXwC9VpuRj7dILdaAozt0 -zbXb2JSDQ7O60jCC83A4oprQMU6j+P9dVqe+Mtz9OD8ocb8eC/FiO/FTwm9aMl+u -RMzw1GHHI3oODGLg7j6y2oilcsZxKnblePJu8N+mKWFizY5aicRg3rUkKU00Ftx7 -fn2xBQKCAQB7w7Xgie5SStyF+KrC58kuF8WB3oBJEAOjoiIeQhCnbAvK5KfkqZHV -CAc0b8TAtUc/XldOUSk6222oZQmbJ4J3fac1Xb8TlAUjd9iqMnk3+nBT5vSYP5mC -Bf7kUjr/tWQ5MfVWQNfjNTZvHWhvRwvDfzq3h9rxDEbhYbXKx1fdGwboO51aJpgY -6NWLH/RQepFsh91sIUxXi8CxGF5Wm84oRn4k7esXkdgZNAPX+N4O/guvZhV9M81D -S/QpAsYEIcuky8P7+Cplx6YXokKa4AXNyglQEHuG9PD7V7SAOxw5dhZAIpNXIThz -OfVcaVf0pVzJQjWKCLW9QHz9UXG0aScfAoIBACdr3exVMUaMOtrAnf2NXj3hecgg -WsWRBOOaSW5wXGt1JNlfYS4zwViafIwy31DNuMg22rj5Mq0TYMtuNYto5RoLSXeB -uupUrENEBnt7JFrwI/NyWG0uYMM3G2MtGHGYooaT9+++wT96QxJZr5fwFYF1ddf6 -5tFeKtNt5VM0wWBHO1voUhQ0TCaooatJjMuAB0+WbvwniKxmdbqQDzY+6myBBUVo -gBJ0JxhxakLm1XGFHDtPCsAAHX/uZ4CvH2uyWqAlx6iwGXd0wwEGrbIRB/BundxR -oaJWswU4FIPAgOpy2LEJKnvzhcmVFtZWD5sFXA1/83QvpceLTFTD5uioBPU= +MIIEpAIBAAKCAQEA1IxPtq4Z3AXrd8V1Az078Sqtf76Ce4FCAVrhxnMLsaqy9L3S +4pM4u5YgDZR6GAXnjwQORm7XPqdEC+gWHkvoHl/VWOJWSfMsQ+VH7m48PbaRtzGA +KswJFmf7mYq8mFNSnHXzJ4FcyHG0+GQacporAZyB5bw7r0ozsyjgcFCqDnrnqlr7 +jCRSjsZfa/RHhmqatLW2XA6CmV3wpkpUnUU3IYR4pCQhgYkoeqALb8mtexvO5Jn/ +imp/wEdzoxsAdTzlGUEVUMcU8y1DtGDW+uFjOAicp6pe1zaQQB7reerJjIfE85R3 +e8hBf83i10FofjV9+zrw922n0Z6hxOJjKPsq5QIDAQABAoIBAQCLj3Xn5XllVx29 +jxG+Br8NI5C4iEb1AXJtoVcODwxmpEbNHLcTvsdJpNF3GT7x9y6MYYVeCfmbUgkE +KGgdjInlJ9fWfQdblyhBjJMmo4s6ml4jg4U8lKyC4dP6hXZALrXXtjrqfa6GjuLd +Fh2nkkMa08EXL/mgp4A662QzW0POLQIo1lMJc49FFPrVQneLedUdsJDowNz/HU/q +oD4/SsKw6inUh/A1MfSKvEhnJcVH4fiQhFQU5CdSzAHPmAYcoBeg6LjY+WScJAAs +Hu5kgunbCsB5vw9WbFDQzM1HYtW1CvJj1cjNp662b06D7VQugjtawhHNImkq1/65 +H2ZTglchAoGBAPu0OX3tEvtic4f8VLRv/TeI9NSC3EgRAtIDncDo+nwVjR54AXID +aePceImGUsDd5xfLuQTiYp50z0cEB5CGsWYbnjm0hliF8YXz/tpqi0V0Cr8fLLA8 +/jG3tajbZ8xu/3p1iEcIPevYT/44bjbOyDp5peQIHhr32LZ1gZfQDRt7AoGBANgt +AIid1rPIyEzhhznpWVjw/ZIrtgaP0HDgKaUUCsEqEDoOJEaFS7WG4G7m8/iS4f8v +XGgcoYf4TjfIwYtRQy2Bp9g4oOMiUbQKukF1DuFJpsw69y3hNNoZoUm7r2jpv3Q8 +/NY+O+BNaTVdmbOjNHmKo99MYGh1cPUPVGxuP1UfAoGBAOJ9fe5OUfJa2NLYv+/N +hfFfD8/aIRXIGN2Z224nNp5JVj7AhaxuXe5oCR7W+8gI5VWIP+ihPVSQj6O7gIMQ +cLkMyQfr5afqfzamJAGuNbw9ex4Xk0LS33klchWLuI9Aoiszb3lbdTyv3OtJJAO1 +dn8Hz7qtg0mJFDy65+4PjHvZAoGAXtKmmEZ75hKdYbPPiCSGT5At+g74Yjp1GP4K +5mE7Mm3L/lszqEdR5UdLbPobbB6pyTCyHOzqIeVWEfwagYzcpbposFxunhLwucO2 +3X2GUGXpJ056HALcFwsFB32vPJrDoy4ZTbSwuPvbuU/cWsKtAt9AcHNlGozhRm05 +//IAD8sCgYAUs6ibNtUqCFjekr10FBGFuA2ZQg+9bQYw3ti+S6uFMsxIDqYRC2bG +yvKhEYym/W7RwfzPWjGzuvFbZWzJnnb81WLfcI4DnrJe3h8THlnaBQhcsEObu84O +XS/sYeVo5c6l0kTNp0I8vXbn05bExZlsLAIICMTsm5bSQZI/iCRyEw== -----END RSA PRIVATE KEY----- From dabcfdc5a61f4fa1a78b2ff877dbd5389651503d Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 21 May 2018 16:33:38 -0700 Subject: [PATCH 094/276] Update for vndr update Signed-off-by: Derek McGowan --- .../x/crypto/otr/libotr_test_helper.c | 171 ++ vendor/golang.org/x/crypto/otr/otr.go | 1400 +++++++++++++++++ vendor/golang.org/x/crypto/otr/smp.go | 572 +++++++ 3 files changed, 2143 insertions(+) create mode 100644 vendor/golang.org/x/crypto/otr/libotr_test_helper.c create mode 100644 vendor/golang.org/x/crypto/otr/otr.go create mode 100644 vendor/golang.org/x/crypto/otr/smp.go diff --git a/vendor/golang.org/x/crypto/otr/libotr_test_helper.c b/vendor/golang.org/x/crypto/otr/libotr_test_helper.c new file mode 100644 index 00000000..6423eeda --- /dev/null +++ b/vendor/golang.org/x/crypto/otr/libotr_test_helper.c @@ -0,0 +1,171 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code can be compiled and used to test the otr package against libotr. +// See otr_test.go. + +// +build ignore + +#include +#include +#include + +#include +#include + +static int g_session_established = 0; + +OtrlPolicy policy(void *opdata, ConnContext *context) { + return OTRL_POLICY_ALWAYS; +} + +int is_logged_in(void *opdata, const char *accountname, const char *protocol, const char *recipient) { + return 1; +} + +void inject_message(void *opdata, const char *accountname, const char *protocol, const char *recipient, const char *message) { + printf("%s\n", message); + fflush(stdout); + fprintf(stderr, "libotr helper sent: %s\n", message); +} + +void notify(void *opdata, OtrlNotifyLevel level, const char *accountname, const char *protocol, const char *username, const char *title, const char *primary, const char *secondary) { + fprintf(stderr, "NOTIFY: %s %s %s %s\n", username, title, primary, secondary); +} + +int display_otr_message(void *opdata, const char *accountname, const char *protocol, const char *username, const char *msg) { + fprintf(stderr, "MESSAGE: %s %s\n", username, msg); + return 1; +} + +void update_context_list(void *opdata) { +} + +const char *protocol_name(void *opdata, const char *protocol) { + return "PROTOCOL"; +} + +void protocol_name_free(void *opdata, const char *protocol_name) { +} + +void new_fingerprint(void *opdata, OtrlUserState us, const char *accountname, const char *protocol, const char *username, unsigned char fingerprint[20]) { + fprintf(stderr, "NEW FINGERPRINT\n"); + g_session_established = 1; +} + +void write_fingerprints(void *opdata) { +} + +void gone_secure(void *opdata, ConnContext *context) { +} + +void gone_insecure(void *opdata, ConnContext *context) { +} + +void still_secure(void *opdata, ConnContext *context, int is_reply) { +} + +void log_message(void *opdata, const char *message) { + fprintf(stderr, "MESSAGE: %s\n", message); +} + +int max_message_size(void *opdata, ConnContext *context) { + return 99999; +} + +const char *account_name(void *opdata, const char *account, const char *protocol) { + return "ACCOUNT"; +} + +void account_name_free(void *opdata, const char *account_name) { +} + +OtrlMessageAppOps uiops = { + policy, + NULL, + is_logged_in, + inject_message, + notify, + display_otr_message, + update_context_list, + protocol_name, + protocol_name_free, + new_fingerprint, + write_fingerprints, + gone_secure, + gone_insecure, + still_secure, + log_message, + max_message_size, + account_name, + account_name_free, +}; + +static const char kPrivateKeyData[] = "(privkeys (account (name \"account\") (protocol proto) (private-key (dsa (p #00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB8C031D3561FECEE72EBB4A090D450A9B7A857#) (q #00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#) (g #535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57597766A2F9CE3857D7ACE3E1E3BC1FC6F26#) (y #0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A3C0FF501E3DC673B76D7BABF349009B6ECF#) (x #14D0345A3562C480A039E3C72764F72D79043216#)))))\n"; + +int +main() { + OTRL_INIT; + + // We have to write the private key information to a file because the libotr + // API demands a filename to read from. + const char *tmpdir = "/tmp"; + if (getenv("TMP")) { + tmpdir = getenv("TMP"); + } + + char private_key_file[256]; + snprintf(private_key_file, sizeof(private_key_file), "%s/libotr_test_helper_privatekeys-XXXXXX", tmpdir); + int fd = mkstemp(private_key_file); + if (fd == -1) { + perror("creating temp file"); + } + write(fd, kPrivateKeyData, sizeof(kPrivateKeyData)-1); + close(fd); + + OtrlUserState userstate = otrl_userstate_create(); + otrl_privkey_read(userstate, private_key_file); + unlink(private_key_file); + + fprintf(stderr, "libotr helper started\n"); + + char buf[4096]; + + for (;;) { + char* message = fgets(buf, sizeof(buf), stdin); + if (strlen(message) == 0) { + break; + } + message[strlen(message) - 1] = 0; + fprintf(stderr, "libotr helper got: %s\n", message); + + char *newmessage = NULL; + OtrlTLV *tlvs; + int ignore_message = otrl_message_receiving(userstate, &uiops, NULL, "account", "proto", "peer", message, &newmessage, &tlvs, NULL, NULL); + if (tlvs) { + otrl_tlv_free(tlvs); + } + + if (newmessage != NULL) { + fprintf(stderr, "libotr got: %s\n", newmessage); + otrl_message_free(newmessage); + + gcry_error_t err; + char *newmessage = NULL; + + err = otrl_message_sending(userstate, &uiops, NULL, "account", "proto", "peer", "test message", NULL, &newmessage, NULL, NULL); + if (newmessage == NULL) { + fprintf(stderr, "libotr didn't encrypt message\n"); + return 1; + } + write(1, newmessage, strlen(newmessage)); + write(1, "\n", 1); + g_session_established = 0; + otrl_message_free(newmessage); + write(1, "?OTRv2?\n", 8); + } + } + + return 0; +} diff --git a/vendor/golang.org/x/crypto/otr/otr.go b/vendor/golang.org/x/crypto/otr/otr.go new file mode 100644 index 00000000..07ac080e --- /dev/null +++ b/vendor/golang.org/x/crypto/otr/otr.go @@ -0,0 +1,1400 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package otr implements the Off The Record protocol as specified in +// http://www.cypherpunks.ca/otr/Protocol-v2-3.1.0.html +package otr // import "golang.org/x/crypto/otr" + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/hmac" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "encoding/hex" + "errors" + "hash" + "io" + "math/big" + "strconv" +) + +// SecurityChange describes a change in the security state of a Conversation. +type SecurityChange int + +const ( + NoChange SecurityChange = iota + // NewKeys indicates that a key exchange has completed. This occurs + // when a conversation first becomes encrypted, and when the keys are + // renegotiated within an encrypted conversation. + NewKeys + // SMPSecretNeeded indicates that the peer has started an + // authentication and that we need to supply a secret. Call SMPQuestion + // to get the optional, human readable challenge and then Authenticate + // to supply the matching secret. + SMPSecretNeeded + // SMPComplete indicates that an authentication completed. The identity + // of the peer has now been confirmed. + SMPComplete + // SMPFailed indicates that an authentication failed. + SMPFailed + // ConversationEnded indicates that the peer ended the secure + // conversation. + ConversationEnded +) + +// QueryMessage can be sent to a peer to start an OTR conversation. +var QueryMessage = "?OTRv2?" + +// ErrorPrefix can be used to make an OTR error by appending an error message +// to it. +var ErrorPrefix = "?OTR Error:" + +var ( + fragmentPartSeparator = []byte(",") + fragmentPrefix = []byte("?OTR,") + msgPrefix = []byte("?OTR:") + queryMarker = []byte("?OTR") +) + +// isQuery attempts to parse an OTR query from msg and returns the greatest +// common version, or 0 if msg is not an OTR query. +func isQuery(msg []byte) (greatestCommonVersion int) { + pos := bytes.Index(msg, queryMarker) + if pos == -1 { + return 0 + } + for i, c := range msg[pos+len(queryMarker):] { + if i == 0 { + if c == '?' { + // Indicates support for version 1, but we don't + // implement that. + continue + } + + if c != 'v' { + // Invalid message + return 0 + } + + continue + } + + if c == '?' { + // End of message + return + } + + if c == ' ' || c == '\t' { + // Probably an invalid message + return 0 + } + + if c == '2' { + greatestCommonVersion = 2 + } + } + + return 0 +} + +const ( + statePlaintext = iota + stateEncrypted + stateFinished +) + +const ( + authStateNone = iota + authStateAwaitingDHKey + authStateAwaitingRevealSig + authStateAwaitingSig +) + +const ( + msgTypeDHCommit = 2 + msgTypeData = 3 + msgTypeDHKey = 10 + msgTypeRevealSig = 17 + msgTypeSig = 18 +) + +const ( + // If the requested fragment size is less than this, it will be ignored. + minFragmentSize = 18 + // Messages are padded to a multiple of this number of bytes. + paddingGranularity = 256 + // The number of bytes in a Diffie-Hellman private value (320-bits). + dhPrivateBytes = 40 + // The number of bytes needed to represent an element of the DSA + // subgroup (160-bits). + dsaSubgroupBytes = 20 + // The number of bytes of the MAC that are sent on the wire (160-bits). + macPrefixBytes = 20 +) + +// These are the global, common group parameters for OTR. +var ( + p *big.Int // group prime + g *big.Int // group generator + q *big.Int // group order + pMinus2 *big.Int +) + +func init() { + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", 16) + q, _ = new(big.Int).SetString("7FFFFFFFFFFFFFFFE487ED5110B4611A62633145C06E0E68948127044533E63A0105DF531D89CD9128A5043CC71A026EF7CA8CD9E69D218D98158536F92F8A1BA7F09AB6B6A8E122F242DABB312F3F637A262174D31BF6B585FFAE5B7A035BF6F71C35FDAD44CFD2D74F9208BE258FF324943328F6722D9EE1003E5C50B1DF82CC6D241B0E2AE9CD348B1FD47E9267AFC1B2AE91EE51D6CB0E3179AB1042A95DCF6A9483B84B4B36B3861AA7255E4C0278BA36046511B993FFFFFFFFFFFFFFFF", 16) + g = new(big.Int).SetInt64(2) + pMinus2 = new(big.Int).Sub(p, g) +} + +// Conversation represents a relation with a peer. The zero value is a valid +// Conversation, although PrivateKey must be set. +// +// When communicating with a peer, all inbound messages should be passed to +// Conversation.Receive and all outbound messages to Conversation.Send. The +// Conversation will take care of maintaining the encryption state and +// negotiating encryption as needed. +type Conversation struct { + // PrivateKey contains the private key to use to sign key exchanges. + PrivateKey *PrivateKey + + // Rand can be set to override the entropy source. Otherwise, + // crypto/rand will be used. + Rand io.Reader + // If FragmentSize is set, all messages produced by Receive and Send + // will be fragmented into messages of, at most, this number of bytes. + FragmentSize int + + // Once Receive has returned NewKeys once, the following fields are + // valid. + SSID [8]byte + TheirPublicKey PublicKey + + state, authState int + + r [16]byte + x, y *big.Int + gx, gy *big.Int + gxBytes []byte + digest [sha256.Size]byte + + revealKeys, sigKeys akeKeys + + myKeyId uint32 + myCurrentDHPub *big.Int + myCurrentDHPriv *big.Int + myLastDHPub *big.Int + myLastDHPriv *big.Int + + theirKeyId uint32 + theirCurrentDHPub *big.Int + theirLastDHPub *big.Int + + keySlots [4]keySlot + + myCounter [8]byte + theirLastCtr [8]byte + oldMACs []byte + + k, n int // fragment state + frag []byte + + smp smpState +} + +// A keySlot contains key material for a specific (their keyid, my keyid) pair. +type keySlot struct { + // used is true if this slot is valid. If false, it's free for reuse. + used bool + theirKeyId uint32 + myKeyId uint32 + sendAESKey, recvAESKey []byte + sendMACKey, recvMACKey []byte + theirLastCtr [8]byte +} + +// akeKeys are generated during key exchange. There's one set for the reveal +// signature message and another for the signature message. In the protocol +// spec the latter are indicated with a prime mark. +type akeKeys struct { + c [16]byte + m1, m2 [32]byte +} + +func (c *Conversation) rand() io.Reader { + if c.Rand != nil { + return c.Rand + } + return rand.Reader +} + +func (c *Conversation) randMPI(buf []byte) *big.Int { + _, err := io.ReadFull(c.rand(), buf) + if err != nil { + panic("otr: short read from random source") + } + + return new(big.Int).SetBytes(buf) +} + +// tlv represents the type-length value from the protocol. +type tlv struct { + typ, length uint16 + data []byte +} + +const ( + tlvTypePadding = 0 + tlvTypeDisconnected = 1 + tlvTypeSMP1 = 2 + tlvTypeSMP2 = 3 + tlvTypeSMP3 = 4 + tlvTypeSMP4 = 5 + tlvTypeSMPAbort = 6 + tlvTypeSMP1WithQuestion = 7 +) + +// Receive handles a message from a peer. It returns a human readable message, +// an indicator of whether that message was encrypted, a hint about the +// encryption state and zero or more messages to send back to the peer. +// These messages do not need to be passed to Send before transmission. +func (c *Conversation) Receive(in []byte) (out []byte, encrypted bool, change SecurityChange, toSend [][]byte, err error) { + if bytes.HasPrefix(in, fragmentPrefix) { + in, err = c.processFragment(in) + if in == nil || err != nil { + return + } + } + + if bytes.HasPrefix(in, msgPrefix) && in[len(in)-1] == '.' { + in = in[len(msgPrefix) : len(in)-1] + } else if version := isQuery(in); version > 0 { + c.authState = authStateAwaitingDHKey + c.myKeyId = 0 + toSend = c.encode(c.generateDHCommit()) + return + } else { + // plaintext message + out = in + return + } + + msg := make([]byte, base64.StdEncoding.DecodedLen(len(in))) + msgLen, err := base64.StdEncoding.Decode(msg, in) + if err != nil { + err = errors.New("otr: invalid base64 encoding in message") + return + } + msg = msg[:msgLen] + + // The first two bytes are the protocol version (2) + if len(msg) < 3 || msg[0] != 0 || msg[1] != 2 { + err = errors.New("otr: invalid OTR message") + return + } + + msgType := int(msg[2]) + msg = msg[3:] + + switch msgType { + case msgTypeDHCommit: + switch c.authState { + case authStateNone: + c.authState = authStateAwaitingRevealSig + if err = c.processDHCommit(msg); err != nil { + return + } + c.myKeyId = 0 + toSend = c.encode(c.generateDHKey()) + return + case authStateAwaitingDHKey: + // This is a 'SYN-crossing'. The greater digest wins. + var cmp int + if cmp, err = c.compareToDHCommit(msg); err != nil { + return + } + if cmp > 0 { + // We win. Retransmit DH commit. + toSend = c.encode(c.serializeDHCommit()) + return + } else { + // They win. We forget about our DH commit. + c.authState = authStateAwaitingRevealSig + if err = c.processDHCommit(msg); err != nil { + return + } + c.myKeyId = 0 + toSend = c.encode(c.generateDHKey()) + return + } + case authStateAwaitingRevealSig: + if err = c.processDHCommit(msg); err != nil { + return + } + toSend = c.encode(c.serializeDHKey()) + case authStateAwaitingSig: + if err = c.processDHCommit(msg); err != nil { + return + } + c.myKeyId = 0 + toSend = c.encode(c.generateDHKey()) + c.authState = authStateAwaitingRevealSig + default: + panic("bad state") + } + case msgTypeDHKey: + switch c.authState { + case authStateAwaitingDHKey: + var isSame bool + if isSame, err = c.processDHKey(msg); err != nil { + return + } + if isSame { + err = errors.New("otr: unexpected duplicate DH key") + return + } + toSend = c.encode(c.generateRevealSig()) + c.authState = authStateAwaitingSig + case authStateAwaitingSig: + var isSame bool + if isSame, err = c.processDHKey(msg); err != nil { + return + } + if isSame { + toSend = c.encode(c.serializeDHKey()) + } + } + case msgTypeRevealSig: + if c.authState != authStateAwaitingRevealSig { + return + } + if err = c.processRevealSig(msg); err != nil { + return + } + toSend = c.encode(c.generateSig()) + c.authState = authStateNone + c.state = stateEncrypted + change = NewKeys + case msgTypeSig: + if c.authState != authStateAwaitingSig { + return + } + if err = c.processSig(msg); err != nil { + return + } + c.authState = authStateNone + c.state = stateEncrypted + change = NewKeys + case msgTypeData: + if c.state != stateEncrypted { + err = errors.New("otr: encrypted message received without encrypted session established") + return + } + var tlvs []tlv + out, tlvs, err = c.processData(msg) + encrypted = true + + EachTLV: + for _, inTLV := range tlvs { + switch inTLV.typ { + case tlvTypeDisconnected: + change = ConversationEnded + c.state = stateFinished + break EachTLV + case tlvTypeSMP1, tlvTypeSMP2, tlvTypeSMP3, tlvTypeSMP4, tlvTypeSMPAbort, tlvTypeSMP1WithQuestion: + var reply tlv + var complete bool + reply, complete, err = c.processSMP(inTLV) + if err == smpSecretMissingError { + err = nil + change = SMPSecretNeeded + c.smp.saved = &inTLV + return + } else if err == smpFailureError { + err = nil + change = SMPFailed + return + } + if complete { + change = SMPComplete + } + if reply.typ != 0 { + toSend = c.encode(c.generateData(nil, &reply)) + } + break EachTLV + default: + // skip unknown TLVs + } + } + default: + err = errors.New("otr: unknown message type " + strconv.Itoa(msgType)) + } + + return +} + +// Send takes a human readable message from the local user, possibly encrypts +// it and returns zero one or more messages to send to the peer. +func (c *Conversation) Send(msg []byte) ([][]byte, error) { + switch c.state { + case statePlaintext: + return [][]byte{msg}, nil + case stateEncrypted: + return c.encode(c.generateData(msg, nil)), nil + case stateFinished: + return nil, errors.New("otr: cannot send message because secure conversation has finished") + } + + return nil, errors.New("otr: cannot send message in current state") +} + +// SMPQuestion returns the human readable challenge question from the peer. +// It's only valid after Receive has returned SMPSecretNeeded. +func (c *Conversation) SMPQuestion() string { + return c.smp.question +} + +// Authenticate begins an authentication with the peer. Authentication involves +// an optional challenge message and a shared secret. The authentication +// proceeds until either Receive returns SMPComplete, SMPSecretNeeded (which +// indicates that a new authentication is happening and thus this one was +// aborted) or SMPFailed. +func (c *Conversation) Authenticate(question string, mutualSecret []byte) (toSend [][]byte, err error) { + if c.state != stateEncrypted { + err = errors.New("otr: can't authenticate a peer without a secure conversation established") + return + } + + if c.smp.saved != nil { + c.calcSMPSecret(mutualSecret, false /* they started it */) + + var out tlv + var complete bool + out, complete, err = c.processSMP(*c.smp.saved) + if complete { + panic("SMP completed on the first message") + } + c.smp.saved = nil + if out.typ != 0 { + toSend = c.encode(c.generateData(nil, &out)) + } + return + } + + c.calcSMPSecret(mutualSecret, true /* we started it */) + outs := c.startSMP(question) + for _, out := range outs { + toSend = append(toSend, c.encode(c.generateData(nil, &out))...) + } + return +} + +// End ends a secure conversation by generating a termination message for +// the peer and switches to unencrypted communication. +func (c *Conversation) End() (toSend [][]byte) { + switch c.state { + case statePlaintext: + return nil + case stateEncrypted: + c.state = statePlaintext + return c.encode(c.generateData(nil, &tlv{typ: tlvTypeDisconnected})) + case stateFinished: + c.state = statePlaintext + return nil + } + panic("unreachable") +} + +// IsEncrypted returns true if a message passed to Send would be encrypted +// before transmission. This result remains valid until the next call to +// Receive or End, which may change the state of the Conversation. +func (c *Conversation) IsEncrypted() bool { + return c.state == stateEncrypted +} + +var fragmentError = errors.New("otr: invalid OTR fragment") + +// processFragment processes a fragmented OTR message and possibly returns a +// complete message. Fragmented messages look like "?OTR,k,n,msg," where k is +// the fragment number (starting from 1), n is the number of fragments in this +// message and msg is a substring of the base64 encoded message. +func (c *Conversation) processFragment(in []byte) (out []byte, err error) { + in = in[len(fragmentPrefix):] // remove "?OTR," + parts := bytes.Split(in, fragmentPartSeparator) + if len(parts) != 4 || len(parts[3]) != 0 { + return nil, fragmentError + } + + k, err := strconv.Atoi(string(parts[0])) + if err != nil { + return nil, fragmentError + } + + n, err := strconv.Atoi(string(parts[1])) + if err != nil { + return nil, fragmentError + } + + if k < 1 || n < 1 || k > n { + return nil, fragmentError + } + + if k == 1 { + c.frag = append(c.frag[:0], parts[2]...) + c.k, c.n = k, n + } else if n == c.n && k == c.k+1 { + c.frag = append(c.frag, parts[2]...) + c.k++ + } else { + c.frag = c.frag[:0] + c.n, c.k = 0, 0 + } + + if c.n > 0 && c.k == c.n { + c.n, c.k = 0, 0 + return c.frag, nil + } + + return nil, nil +} + +func (c *Conversation) generateDHCommit() []byte { + _, err := io.ReadFull(c.rand(), c.r[:]) + if err != nil { + panic("otr: short read from random source") + } + + var xBytes [dhPrivateBytes]byte + c.x = c.randMPI(xBytes[:]) + c.gx = new(big.Int).Exp(g, c.x, p) + c.gy = nil + c.gxBytes = appendMPI(nil, c.gx) + + h := sha256.New() + h.Write(c.gxBytes) + h.Sum(c.digest[:0]) + + aesCipher, err := aes.NewCipher(c.r[:]) + if err != nil { + panic(err.Error()) + } + + var iv [aes.BlockSize]byte + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(c.gxBytes, c.gxBytes) + + return c.serializeDHCommit() +} + +func (c *Conversation) serializeDHCommit() []byte { + var ret []byte + ret = appendU16(ret, 2) // protocol version + ret = append(ret, msgTypeDHCommit) + ret = appendData(ret, c.gxBytes) + ret = appendData(ret, c.digest[:]) + return ret +} + +func (c *Conversation) processDHCommit(in []byte) error { + var ok1, ok2 bool + c.gxBytes, in, ok1 = getData(in) + digest, in, ok2 := getData(in) + if !ok1 || !ok2 || len(in) > 0 { + return errors.New("otr: corrupt DH commit message") + } + copy(c.digest[:], digest) + return nil +} + +func (c *Conversation) compareToDHCommit(in []byte) (int, error) { + _, in, ok1 := getData(in) + digest, in, ok2 := getData(in) + if !ok1 || !ok2 || len(in) > 0 { + return 0, errors.New("otr: corrupt DH commit message") + } + return bytes.Compare(c.digest[:], digest), nil +} + +func (c *Conversation) generateDHKey() []byte { + var yBytes [dhPrivateBytes]byte + c.y = c.randMPI(yBytes[:]) + c.gy = new(big.Int).Exp(g, c.y, p) + return c.serializeDHKey() +} + +func (c *Conversation) serializeDHKey() []byte { + var ret []byte + ret = appendU16(ret, 2) // protocol version + ret = append(ret, msgTypeDHKey) + ret = appendMPI(ret, c.gy) + return ret +} + +func (c *Conversation) processDHKey(in []byte) (isSame bool, err error) { + gy, in, ok := getMPI(in) + if !ok { + err = errors.New("otr: corrupt DH key message") + return + } + if gy.Cmp(g) < 0 || gy.Cmp(pMinus2) > 0 { + err = errors.New("otr: DH value out of range") + return + } + if c.gy != nil { + isSame = c.gy.Cmp(gy) == 0 + return + } + c.gy = gy + return +} + +func (c *Conversation) generateEncryptedSignature(keys *akeKeys, xFirst bool) ([]byte, []byte) { + var xb []byte + xb = c.PrivateKey.PublicKey.Serialize(xb) + + var verifyData []byte + if xFirst { + verifyData = appendMPI(verifyData, c.gx) + verifyData = appendMPI(verifyData, c.gy) + } else { + verifyData = appendMPI(verifyData, c.gy) + verifyData = appendMPI(verifyData, c.gx) + } + verifyData = append(verifyData, xb...) + verifyData = appendU32(verifyData, c.myKeyId) + + mac := hmac.New(sha256.New, keys.m1[:]) + mac.Write(verifyData) + mb := mac.Sum(nil) + + xb = appendU32(xb, c.myKeyId) + xb = append(xb, c.PrivateKey.Sign(c.rand(), mb)...) + + aesCipher, err := aes.NewCipher(keys.c[:]) + if err != nil { + panic(err.Error()) + } + var iv [aes.BlockSize]byte + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(xb, xb) + + mac = hmac.New(sha256.New, keys.m2[:]) + encryptedSig := appendData(nil, xb) + mac.Write(encryptedSig) + + return encryptedSig, mac.Sum(nil) +} + +func (c *Conversation) generateRevealSig() []byte { + s := new(big.Int).Exp(c.gy, c.x, p) + c.calcAKEKeys(s) + c.myKeyId++ + + encryptedSig, mac := c.generateEncryptedSignature(&c.revealKeys, true /* gx comes first */) + + c.myCurrentDHPub = c.gx + c.myCurrentDHPriv = c.x + c.rotateDHKeys() + incCounter(&c.myCounter) + + var ret []byte + ret = appendU16(ret, 2) + ret = append(ret, msgTypeRevealSig) + ret = appendData(ret, c.r[:]) + ret = append(ret, encryptedSig...) + ret = append(ret, mac[:20]...) + return ret +} + +func (c *Conversation) processEncryptedSig(encryptedSig, theirMAC []byte, keys *akeKeys, xFirst bool) error { + mac := hmac.New(sha256.New, keys.m2[:]) + mac.Write(appendData(nil, encryptedSig)) + myMAC := mac.Sum(nil)[:20] + + if len(myMAC) != len(theirMAC) || subtle.ConstantTimeCompare(myMAC, theirMAC) == 0 { + return errors.New("bad signature MAC in encrypted signature") + } + + aesCipher, err := aes.NewCipher(keys.c[:]) + if err != nil { + panic(err.Error()) + } + var iv [aes.BlockSize]byte + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(encryptedSig, encryptedSig) + + sig := encryptedSig + sig, ok1 := c.TheirPublicKey.Parse(sig) + keyId, sig, ok2 := getU32(sig) + if !ok1 || !ok2 { + return errors.New("otr: corrupt encrypted signature") + } + + var verifyData []byte + if xFirst { + verifyData = appendMPI(verifyData, c.gx) + verifyData = appendMPI(verifyData, c.gy) + } else { + verifyData = appendMPI(verifyData, c.gy) + verifyData = appendMPI(verifyData, c.gx) + } + verifyData = c.TheirPublicKey.Serialize(verifyData) + verifyData = appendU32(verifyData, keyId) + + mac = hmac.New(sha256.New, keys.m1[:]) + mac.Write(verifyData) + mb := mac.Sum(nil) + + sig, ok1 = c.TheirPublicKey.Verify(mb, sig) + if !ok1 { + return errors.New("bad signature in encrypted signature") + } + if len(sig) > 0 { + return errors.New("corrupt encrypted signature") + } + + c.theirKeyId = keyId + zero(c.theirLastCtr[:]) + return nil +} + +func (c *Conversation) processRevealSig(in []byte) error { + r, in, ok1 := getData(in) + encryptedSig, in, ok2 := getData(in) + theirMAC := in + if !ok1 || !ok2 || len(theirMAC) != 20 { + return errors.New("otr: corrupt reveal signature message") + } + + aesCipher, err := aes.NewCipher(r) + if err != nil { + return errors.New("otr: cannot create AES cipher from reveal signature message: " + err.Error()) + } + var iv [aes.BlockSize]byte + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(c.gxBytes, c.gxBytes) + h := sha256.New() + h.Write(c.gxBytes) + digest := h.Sum(nil) + if len(digest) != len(c.digest) || subtle.ConstantTimeCompare(digest, c.digest[:]) == 0 { + return errors.New("otr: bad commit MAC in reveal signature message") + } + var rest []byte + c.gx, rest, ok1 = getMPI(c.gxBytes) + if !ok1 || len(rest) > 0 { + return errors.New("otr: gx corrupt after decryption") + } + if c.gx.Cmp(g) < 0 || c.gx.Cmp(pMinus2) > 0 { + return errors.New("otr: DH value out of range") + } + s := new(big.Int).Exp(c.gx, c.y, p) + c.calcAKEKeys(s) + + if err := c.processEncryptedSig(encryptedSig, theirMAC, &c.revealKeys, true /* gx comes first */); err != nil { + return errors.New("otr: in reveal signature message: " + err.Error()) + } + + c.theirCurrentDHPub = c.gx + c.theirLastDHPub = nil + + return nil +} + +func (c *Conversation) generateSig() []byte { + c.myKeyId++ + + encryptedSig, mac := c.generateEncryptedSignature(&c.sigKeys, false /* gy comes first */) + + c.myCurrentDHPub = c.gy + c.myCurrentDHPriv = c.y + c.rotateDHKeys() + incCounter(&c.myCounter) + + var ret []byte + ret = appendU16(ret, 2) + ret = append(ret, msgTypeSig) + ret = append(ret, encryptedSig...) + ret = append(ret, mac[:macPrefixBytes]...) + return ret +} + +func (c *Conversation) processSig(in []byte) error { + encryptedSig, in, ok1 := getData(in) + theirMAC := in + if !ok1 || len(theirMAC) != macPrefixBytes { + return errors.New("otr: corrupt signature message") + } + + if err := c.processEncryptedSig(encryptedSig, theirMAC, &c.sigKeys, false /* gy comes first */); err != nil { + return errors.New("otr: in signature message: " + err.Error()) + } + + c.theirCurrentDHPub = c.gy + c.theirLastDHPub = nil + + return nil +} + +func (c *Conversation) rotateDHKeys() { + // evict slots using our retired key id + for i := range c.keySlots { + slot := &c.keySlots[i] + if slot.used && slot.myKeyId == c.myKeyId-1 { + slot.used = false + c.oldMACs = append(c.oldMACs, slot.sendMACKey...) + c.oldMACs = append(c.oldMACs, slot.recvMACKey...) + } + } + + c.myLastDHPriv = c.myCurrentDHPriv + c.myLastDHPub = c.myCurrentDHPub + + var xBytes [dhPrivateBytes]byte + c.myCurrentDHPriv = c.randMPI(xBytes[:]) + c.myCurrentDHPub = new(big.Int).Exp(g, c.myCurrentDHPriv, p) + c.myKeyId++ +} + +func (c *Conversation) processData(in []byte) (out []byte, tlvs []tlv, err error) { + origIn := in + flags, in, ok1 := getU8(in) + theirKeyId, in, ok2 := getU32(in) + myKeyId, in, ok3 := getU32(in) + y, in, ok4 := getMPI(in) + counter, in, ok5 := getNBytes(in, 8) + encrypted, in, ok6 := getData(in) + macedData := origIn[:len(origIn)-len(in)] + theirMAC, in, ok7 := getNBytes(in, macPrefixBytes) + _, in, ok8 := getData(in) + if !ok1 || !ok2 || !ok3 || !ok4 || !ok5 || !ok6 || !ok7 || !ok8 || len(in) > 0 { + err = errors.New("otr: corrupt data message") + return + } + + ignoreErrors := flags&1 != 0 + + slot, err := c.calcDataKeys(myKeyId, theirKeyId) + if err != nil { + if ignoreErrors { + err = nil + } + return + } + + mac := hmac.New(sha1.New, slot.recvMACKey) + mac.Write([]byte{0, 2, 3}) + mac.Write(macedData) + myMAC := mac.Sum(nil) + if len(myMAC) != len(theirMAC) || subtle.ConstantTimeCompare(myMAC, theirMAC) == 0 { + if !ignoreErrors { + err = errors.New("otr: bad MAC on data message") + } + return + } + + if bytes.Compare(counter, slot.theirLastCtr[:]) <= 0 { + err = errors.New("otr: counter regressed") + return + } + copy(slot.theirLastCtr[:], counter) + + var iv [aes.BlockSize]byte + copy(iv[:], counter) + aesCipher, err := aes.NewCipher(slot.recvAESKey) + if err != nil { + panic(err.Error()) + } + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(encrypted, encrypted) + decrypted := encrypted + + if myKeyId == c.myKeyId { + c.rotateDHKeys() + } + if theirKeyId == c.theirKeyId { + // evict slots using their retired key id + for i := range c.keySlots { + slot := &c.keySlots[i] + if slot.used && slot.theirKeyId == theirKeyId-1 { + slot.used = false + c.oldMACs = append(c.oldMACs, slot.sendMACKey...) + c.oldMACs = append(c.oldMACs, slot.recvMACKey...) + } + } + + c.theirLastDHPub = c.theirCurrentDHPub + c.theirKeyId++ + c.theirCurrentDHPub = y + } + + if nulPos := bytes.IndexByte(decrypted, 0); nulPos >= 0 { + out = decrypted[:nulPos] + tlvData := decrypted[nulPos+1:] + for len(tlvData) > 0 { + var t tlv + var ok1, ok2, ok3 bool + + t.typ, tlvData, ok1 = getU16(tlvData) + t.length, tlvData, ok2 = getU16(tlvData) + t.data, tlvData, ok3 = getNBytes(tlvData, int(t.length)) + if !ok1 || !ok2 || !ok3 { + err = errors.New("otr: corrupt tlv data") + } + tlvs = append(tlvs, t) + } + } else { + out = decrypted + } + + return +} + +func (c *Conversation) generateData(msg []byte, extra *tlv) []byte { + slot, err := c.calcDataKeys(c.myKeyId-1, c.theirKeyId) + if err != nil { + panic("otr: failed to generate sending keys: " + err.Error()) + } + + var plaintext []byte + plaintext = append(plaintext, msg...) + plaintext = append(plaintext, 0) + + padding := paddingGranularity - ((len(plaintext) + 4) % paddingGranularity) + plaintext = appendU16(plaintext, tlvTypePadding) + plaintext = appendU16(plaintext, uint16(padding)) + for i := 0; i < padding; i++ { + plaintext = append(plaintext, 0) + } + + if extra != nil { + plaintext = appendU16(plaintext, extra.typ) + plaintext = appendU16(plaintext, uint16(len(extra.data))) + plaintext = append(plaintext, extra.data...) + } + + encrypted := make([]byte, len(plaintext)) + + var iv [aes.BlockSize]byte + copy(iv[:], c.myCounter[:]) + aesCipher, err := aes.NewCipher(slot.sendAESKey) + if err != nil { + panic(err.Error()) + } + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(encrypted, plaintext) + + var ret []byte + ret = appendU16(ret, 2) + ret = append(ret, msgTypeData) + ret = append(ret, 0 /* flags */) + ret = appendU32(ret, c.myKeyId-1) + ret = appendU32(ret, c.theirKeyId) + ret = appendMPI(ret, c.myCurrentDHPub) + ret = append(ret, c.myCounter[:]...) + ret = appendData(ret, encrypted) + + mac := hmac.New(sha1.New, slot.sendMACKey) + mac.Write(ret) + ret = append(ret, mac.Sum(nil)[:macPrefixBytes]...) + ret = appendData(ret, c.oldMACs) + c.oldMACs = nil + incCounter(&c.myCounter) + + return ret +} + +func incCounter(counter *[8]byte) { + for i := 7; i >= 0; i-- { + counter[i]++ + if counter[i] > 0 { + break + } + } +} + +// calcDataKeys computes the keys used to encrypt a data message given the key +// IDs. +func (c *Conversation) calcDataKeys(myKeyId, theirKeyId uint32) (slot *keySlot, err error) { + // Check for a cache hit. + for i := range c.keySlots { + slot = &c.keySlots[i] + if slot.used && slot.theirKeyId == theirKeyId && slot.myKeyId == myKeyId { + return + } + } + + // Find an empty slot to write into. + slot = nil + for i := range c.keySlots { + if !c.keySlots[i].used { + slot = &c.keySlots[i] + break + } + } + if slot == nil { + err = errors.New("otr: internal error: no key slots") + return + } + + var myPriv, myPub, theirPub *big.Int + + if myKeyId == c.myKeyId { + myPriv = c.myCurrentDHPriv + myPub = c.myCurrentDHPub + } else if myKeyId == c.myKeyId-1 { + myPriv = c.myLastDHPriv + myPub = c.myLastDHPub + } else { + err = errors.New("otr: peer requested keyid " + strconv.FormatUint(uint64(myKeyId), 10) + " when I'm on " + strconv.FormatUint(uint64(c.myKeyId), 10)) + return + } + + if theirKeyId == c.theirKeyId { + theirPub = c.theirCurrentDHPub + } else if theirKeyId == c.theirKeyId-1 && c.theirLastDHPub != nil { + theirPub = c.theirLastDHPub + } else { + err = errors.New("otr: peer requested keyid " + strconv.FormatUint(uint64(myKeyId), 10) + " when they're on " + strconv.FormatUint(uint64(c.myKeyId), 10)) + return + } + + var sendPrefixByte, recvPrefixByte [1]byte + + if myPub.Cmp(theirPub) > 0 { + // we're the high end + sendPrefixByte[0], recvPrefixByte[0] = 1, 2 + } else { + // we're the low end + sendPrefixByte[0], recvPrefixByte[0] = 2, 1 + } + + s := new(big.Int).Exp(theirPub, myPriv, p) + sBytes := appendMPI(nil, s) + + h := sha1.New() + h.Write(sendPrefixByte[:]) + h.Write(sBytes) + slot.sendAESKey = h.Sum(slot.sendAESKey[:0])[:16] + + h.Reset() + h.Write(slot.sendAESKey) + slot.sendMACKey = h.Sum(slot.sendMACKey[:0]) + + h.Reset() + h.Write(recvPrefixByte[:]) + h.Write(sBytes) + slot.recvAESKey = h.Sum(slot.recvAESKey[:0])[:16] + + h.Reset() + h.Write(slot.recvAESKey) + slot.recvMACKey = h.Sum(slot.recvMACKey[:0]) + + zero(slot.theirLastCtr[:]) + return +} + +func (c *Conversation) calcAKEKeys(s *big.Int) { + mpi := appendMPI(nil, s) + h := sha256.New() + + var cBytes [32]byte + hashWithPrefix(c.SSID[:], 0, mpi, h) + + hashWithPrefix(cBytes[:], 1, mpi, h) + copy(c.revealKeys.c[:], cBytes[:16]) + copy(c.sigKeys.c[:], cBytes[16:]) + + hashWithPrefix(c.revealKeys.m1[:], 2, mpi, h) + hashWithPrefix(c.revealKeys.m2[:], 3, mpi, h) + hashWithPrefix(c.sigKeys.m1[:], 4, mpi, h) + hashWithPrefix(c.sigKeys.m2[:], 5, mpi, h) +} + +func hashWithPrefix(out []byte, prefix byte, in []byte, h hash.Hash) { + h.Reset() + var p [1]byte + p[0] = prefix + h.Write(p[:]) + h.Write(in) + if len(out) == h.Size() { + h.Sum(out[:0]) + } else { + digest := h.Sum(nil) + copy(out, digest) + } +} + +func (c *Conversation) encode(msg []byte) [][]byte { + b64 := make([]byte, base64.StdEncoding.EncodedLen(len(msg))+len(msgPrefix)+1) + base64.StdEncoding.Encode(b64[len(msgPrefix):], msg) + copy(b64, msgPrefix) + b64[len(b64)-1] = '.' + + if c.FragmentSize < minFragmentSize || len(b64) <= c.FragmentSize { + // We can encode this in a single fragment. + return [][]byte{b64} + } + + // We have to fragment this message. + var ret [][]byte + bytesPerFragment := c.FragmentSize - minFragmentSize + numFragments := (len(b64) + bytesPerFragment) / bytesPerFragment + + for i := 0; i < numFragments; i++ { + frag := []byte("?OTR," + strconv.Itoa(i+1) + "," + strconv.Itoa(numFragments) + ",") + todo := bytesPerFragment + if todo > len(b64) { + todo = len(b64) + } + frag = append(frag, b64[:todo]...) + b64 = b64[todo:] + frag = append(frag, ',') + ret = append(ret, frag) + } + + return ret +} + +type PublicKey struct { + dsa.PublicKey +} + +func (pk *PublicKey) Parse(in []byte) ([]byte, bool) { + var ok bool + var pubKeyType uint16 + + if pubKeyType, in, ok = getU16(in); !ok || pubKeyType != 0 { + return nil, false + } + if pk.P, in, ok = getMPI(in); !ok { + return nil, false + } + if pk.Q, in, ok = getMPI(in); !ok { + return nil, false + } + if pk.G, in, ok = getMPI(in); !ok { + return nil, false + } + if pk.Y, in, ok = getMPI(in); !ok { + return nil, false + } + + return in, true +} + +func (pk *PublicKey) Serialize(in []byte) []byte { + in = appendU16(in, 0) + in = appendMPI(in, pk.P) + in = appendMPI(in, pk.Q) + in = appendMPI(in, pk.G) + in = appendMPI(in, pk.Y) + return in +} + +// Fingerprint returns the 20-byte, binary fingerprint of the PublicKey. +func (pk *PublicKey) Fingerprint() []byte { + b := pk.Serialize(nil) + h := sha1.New() + h.Write(b[2:]) + return h.Sum(nil) +} + +func (pk *PublicKey) Verify(hashed, sig []byte) ([]byte, bool) { + if len(sig) != 2*dsaSubgroupBytes { + return nil, false + } + r := new(big.Int).SetBytes(sig[:dsaSubgroupBytes]) + s := new(big.Int).SetBytes(sig[dsaSubgroupBytes:]) + ok := dsa.Verify(&pk.PublicKey, hashed, r, s) + return sig[dsaSubgroupBytes*2:], ok +} + +type PrivateKey struct { + PublicKey + dsa.PrivateKey +} + +func (priv *PrivateKey) Sign(rand io.Reader, hashed []byte) []byte { + r, s, err := dsa.Sign(rand, &priv.PrivateKey, hashed) + if err != nil { + panic(err.Error()) + } + rBytes := r.Bytes() + sBytes := s.Bytes() + if len(rBytes) > dsaSubgroupBytes || len(sBytes) > dsaSubgroupBytes { + panic("DSA signature too large") + } + + out := make([]byte, 2*dsaSubgroupBytes) + copy(out[dsaSubgroupBytes-len(rBytes):], rBytes) + copy(out[len(out)-len(sBytes):], sBytes) + return out +} + +func (priv *PrivateKey) Serialize(in []byte) []byte { + in = priv.PublicKey.Serialize(in) + in = appendMPI(in, priv.PrivateKey.X) + return in +} + +func (priv *PrivateKey) Parse(in []byte) ([]byte, bool) { + in, ok := priv.PublicKey.Parse(in) + if !ok { + return in, ok + } + priv.PrivateKey.PublicKey = priv.PublicKey.PublicKey + priv.PrivateKey.X, in, ok = getMPI(in) + return in, ok +} + +func (priv *PrivateKey) Generate(rand io.Reader) { + if err := dsa.GenerateParameters(&priv.PrivateKey.PublicKey.Parameters, rand, dsa.L1024N160); err != nil { + panic(err.Error()) + } + if err := dsa.GenerateKey(&priv.PrivateKey, rand); err != nil { + panic(err.Error()) + } + priv.PublicKey.PublicKey = priv.PrivateKey.PublicKey +} + +func notHex(r rune) bool { + if r >= '0' && r <= '9' || + r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' { + return false + } + + return true +} + +// Import parses the contents of a libotr private key file. +func (priv *PrivateKey) Import(in []byte) bool { + mpiStart := []byte(" #") + + mpis := make([]*big.Int, 5) + + for i := 0; i < len(mpis); i++ { + start := bytes.Index(in, mpiStart) + if start == -1 { + return false + } + in = in[start+len(mpiStart):] + end := bytes.IndexFunc(in, notHex) + if end == -1 { + return false + } + hexBytes := in[:end] + in = in[end:] + + if len(hexBytes)&1 != 0 { + return false + } + + mpiBytes := make([]byte, len(hexBytes)/2) + if _, err := hex.Decode(mpiBytes, hexBytes); err != nil { + return false + } + + mpis[i] = new(big.Int).SetBytes(mpiBytes) + } + + priv.PrivateKey.P = mpis[0] + priv.PrivateKey.Q = mpis[1] + priv.PrivateKey.G = mpis[2] + priv.PrivateKey.Y = mpis[3] + priv.PrivateKey.X = mpis[4] + priv.PublicKey.PublicKey = priv.PrivateKey.PublicKey + + a := new(big.Int).Exp(priv.PrivateKey.G, priv.PrivateKey.X, priv.PrivateKey.P) + return a.Cmp(priv.PrivateKey.Y) == 0 +} + +func getU8(in []byte) (uint8, []byte, bool) { + if len(in) < 1 { + return 0, in, false + } + return in[0], in[1:], true +} + +func getU16(in []byte) (uint16, []byte, bool) { + if len(in) < 2 { + return 0, in, false + } + r := uint16(in[0])<<8 | uint16(in[1]) + return r, in[2:], true +} + +func getU32(in []byte) (uint32, []byte, bool) { + if len(in) < 4 { + return 0, in, false + } + r := uint32(in[0])<<24 | uint32(in[1])<<16 | uint32(in[2])<<8 | uint32(in[3]) + return r, in[4:], true +} + +func getMPI(in []byte) (*big.Int, []byte, bool) { + l, in, ok := getU32(in) + if !ok || uint32(len(in)) < l { + return nil, in, false + } + r := new(big.Int).SetBytes(in[:l]) + return r, in[l:], true +} + +func getData(in []byte) ([]byte, []byte, bool) { + l, in, ok := getU32(in) + if !ok || uint32(len(in)) < l { + return nil, in, false + } + return in[:l], in[l:], true +} + +func getNBytes(in []byte, n int) ([]byte, []byte, bool) { + if len(in) < n { + return nil, in, false + } + return in[:n], in[n:], true +} + +func appendU16(out []byte, v uint16) []byte { + out = append(out, byte(v>>8), byte(v)) + return out +} + +func appendU32(out []byte, v uint32) []byte { + out = append(out, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + return out +} + +func appendData(out, v []byte) []byte { + out = appendU32(out, uint32(len(v))) + out = append(out, v...) + return out +} + +func appendMPI(out []byte, v *big.Int) []byte { + vBytes := v.Bytes() + out = appendU32(out, uint32(len(vBytes))) + out = append(out, vBytes...) + return out +} + +func appendMPIs(out []byte, mpis ...*big.Int) []byte { + for _, mpi := range mpis { + out = appendMPI(out, mpi) + } + return out +} + +func zero(b []byte) { + for i := range b { + b[i] = 0 + } +} diff --git a/vendor/golang.org/x/crypto/otr/smp.go b/vendor/golang.org/x/crypto/otr/smp.go new file mode 100644 index 00000000..dc6de4ee --- /dev/null +++ b/vendor/golang.org/x/crypto/otr/smp.go @@ -0,0 +1,572 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements the Socialist Millionaires Protocol as described in +// http://www.cypherpunks.ca/otr/Protocol-v2-3.1.0.html. The protocol +// specification is required in order to understand this code and, where +// possible, the variable names in the code match up with the spec. + +package otr + +import ( + "bytes" + "crypto/sha256" + "errors" + "hash" + "math/big" +) + +type smpFailure string + +func (s smpFailure) Error() string { + return string(s) +} + +var smpFailureError = smpFailure("otr: SMP protocol failed") +var smpSecretMissingError = smpFailure("otr: mutual secret needed") + +const smpVersion = 1 + +const ( + smpState1 = iota + smpState2 + smpState3 + smpState4 +) + +type smpState struct { + state int + a2, a3, b2, b3, pb, qb *big.Int + g2a, g3a *big.Int + g2, g3 *big.Int + g3b, papb, qaqb, ra *big.Int + saved *tlv + secret *big.Int + question string +} + +func (c *Conversation) startSMP(question string) (tlvs []tlv) { + if c.smp.state != smpState1 { + tlvs = append(tlvs, c.generateSMPAbort()) + } + tlvs = append(tlvs, c.generateSMP1(question)) + c.smp.question = "" + c.smp.state = smpState2 + return +} + +func (c *Conversation) resetSMP() { + c.smp.state = smpState1 + c.smp.secret = nil + c.smp.question = "" +} + +func (c *Conversation) processSMP(in tlv) (out tlv, complete bool, err error) { + data := in.data + + switch in.typ { + case tlvTypeSMPAbort: + if c.smp.state != smpState1 { + err = smpFailureError + } + c.resetSMP() + return + case tlvTypeSMP1WithQuestion: + // We preprocess this into a SMP1 message. + nulPos := bytes.IndexByte(data, 0) + if nulPos == -1 { + err = errors.New("otr: SMP message with question didn't contain a NUL byte") + return + } + c.smp.question = string(data[:nulPos]) + data = data[nulPos+1:] + } + + numMPIs, data, ok := getU32(data) + if !ok || numMPIs > 20 { + err = errors.New("otr: corrupt SMP message") + return + } + + mpis := make([]*big.Int, numMPIs) + for i := range mpis { + var ok bool + mpis[i], data, ok = getMPI(data) + if !ok { + err = errors.New("otr: corrupt SMP message") + return + } + } + + switch in.typ { + case tlvTypeSMP1, tlvTypeSMP1WithQuestion: + if c.smp.state != smpState1 { + c.resetSMP() + out = c.generateSMPAbort() + return + } + if c.smp.secret == nil { + err = smpSecretMissingError + return + } + if err = c.processSMP1(mpis); err != nil { + return + } + c.smp.state = smpState3 + out = c.generateSMP2() + case tlvTypeSMP2: + if c.smp.state != smpState2 { + c.resetSMP() + out = c.generateSMPAbort() + return + } + if out, err = c.processSMP2(mpis); err != nil { + out = c.generateSMPAbort() + return + } + c.smp.state = smpState4 + case tlvTypeSMP3: + if c.smp.state != smpState3 { + c.resetSMP() + out = c.generateSMPAbort() + return + } + if out, err = c.processSMP3(mpis); err != nil { + return + } + c.smp.state = smpState1 + c.smp.secret = nil + complete = true + case tlvTypeSMP4: + if c.smp.state != smpState4 { + c.resetSMP() + out = c.generateSMPAbort() + return + } + if err = c.processSMP4(mpis); err != nil { + out = c.generateSMPAbort() + return + } + c.smp.state = smpState1 + c.smp.secret = nil + complete = true + default: + panic("unknown SMP message") + } + + return +} + +func (c *Conversation) calcSMPSecret(mutualSecret []byte, weStarted bool) { + h := sha256.New() + h.Write([]byte{smpVersion}) + if weStarted { + h.Write(c.PrivateKey.PublicKey.Fingerprint()) + h.Write(c.TheirPublicKey.Fingerprint()) + } else { + h.Write(c.TheirPublicKey.Fingerprint()) + h.Write(c.PrivateKey.PublicKey.Fingerprint()) + } + h.Write(c.SSID[:]) + h.Write(mutualSecret) + c.smp.secret = new(big.Int).SetBytes(h.Sum(nil)) +} + +func (c *Conversation) generateSMP1(question string) tlv { + var randBuf [16]byte + c.smp.a2 = c.randMPI(randBuf[:]) + c.smp.a3 = c.randMPI(randBuf[:]) + g2a := new(big.Int).Exp(g, c.smp.a2, p) + g3a := new(big.Int).Exp(g, c.smp.a3, p) + h := sha256.New() + + r2 := c.randMPI(randBuf[:]) + r := new(big.Int).Exp(g, r2, p) + c2 := new(big.Int).SetBytes(hashMPIs(h, 1, r)) + d2 := new(big.Int).Mul(c.smp.a2, c2) + d2.Sub(r2, d2) + d2.Mod(d2, q) + if d2.Sign() < 0 { + d2.Add(d2, q) + } + + r3 := c.randMPI(randBuf[:]) + r.Exp(g, r3, p) + c3 := new(big.Int).SetBytes(hashMPIs(h, 2, r)) + d3 := new(big.Int).Mul(c.smp.a3, c3) + d3.Sub(r3, d3) + d3.Mod(d3, q) + if d3.Sign() < 0 { + d3.Add(d3, q) + } + + var ret tlv + if len(question) > 0 { + ret.typ = tlvTypeSMP1WithQuestion + ret.data = append(ret.data, question...) + ret.data = append(ret.data, 0) + } else { + ret.typ = tlvTypeSMP1 + } + ret.data = appendU32(ret.data, 6) + ret.data = appendMPIs(ret.data, g2a, c2, d2, g3a, c3, d3) + return ret +} + +func (c *Conversation) processSMP1(mpis []*big.Int) error { + if len(mpis) != 6 { + return errors.New("otr: incorrect number of arguments in SMP1 message") + } + g2a := mpis[0] + c2 := mpis[1] + d2 := mpis[2] + g3a := mpis[3] + c3 := mpis[4] + d3 := mpis[5] + h := sha256.New() + + r := new(big.Int).Exp(g, d2, p) + s := new(big.Int).Exp(g2a, c2, p) + r.Mul(r, s) + r.Mod(r, p) + t := new(big.Int).SetBytes(hashMPIs(h, 1, r)) + if c2.Cmp(t) != 0 { + return errors.New("otr: ZKP c2 incorrect in SMP1 message") + } + r.Exp(g, d3, p) + s.Exp(g3a, c3, p) + r.Mul(r, s) + r.Mod(r, p) + t.SetBytes(hashMPIs(h, 2, r)) + if c3.Cmp(t) != 0 { + return errors.New("otr: ZKP c3 incorrect in SMP1 message") + } + + c.smp.g2a = g2a + c.smp.g3a = g3a + return nil +} + +func (c *Conversation) generateSMP2() tlv { + var randBuf [16]byte + b2 := c.randMPI(randBuf[:]) + c.smp.b3 = c.randMPI(randBuf[:]) + r2 := c.randMPI(randBuf[:]) + r3 := c.randMPI(randBuf[:]) + r4 := c.randMPI(randBuf[:]) + r5 := c.randMPI(randBuf[:]) + r6 := c.randMPI(randBuf[:]) + + g2b := new(big.Int).Exp(g, b2, p) + g3b := new(big.Int).Exp(g, c.smp.b3, p) + + r := new(big.Int).Exp(g, r2, p) + h := sha256.New() + c2 := new(big.Int).SetBytes(hashMPIs(h, 3, r)) + d2 := new(big.Int).Mul(b2, c2) + d2.Sub(r2, d2) + d2.Mod(d2, q) + if d2.Sign() < 0 { + d2.Add(d2, q) + } + + r.Exp(g, r3, p) + c3 := new(big.Int).SetBytes(hashMPIs(h, 4, r)) + d3 := new(big.Int).Mul(c.smp.b3, c3) + d3.Sub(r3, d3) + d3.Mod(d3, q) + if d3.Sign() < 0 { + d3.Add(d3, q) + } + + c.smp.g2 = new(big.Int).Exp(c.smp.g2a, b2, p) + c.smp.g3 = new(big.Int).Exp(c.smp.g3a, c.smp.b3, p) + c.smp.pb = new(big.Int).Exp(c.smp.g3, r4, p) + c.smp.qb = new(big.Int).Exp(g, r4, p) + r.Exp(c.smp.g2, c.smp.secret, p) + c.smp.qb.Mul(c.smp.qb, r) + c.smp.qb.Mod(c.smp.qb, p) + + s := new(big.Int) + s.Exp(c.smp.g2, r6, p) + r.Exp(g, r5, p) + s.Mul(r, s) + s.Mod(s, p) + r.Exp(c.smp.g3, r5, p) + cp := new(big.Int).SetBytes(hashMPIs(h, 5, r, s)) + + // D5 = r5 - r4 cP mod q and D6 = r6 - y cP mod q + + s.Mul(r4, cp) + r.Sub(r5, s) + d5 := new(big.Int).Mod(r, q) + if d5.Sign() < 0 { + d5.Add(d5, q) + } + + s.Mul(c.smp.secret, cp) + r.Sub(r6, s) + d6 := new(big.Int).Mod(r, q) + if d6.Sign() < 0 { + d6.Add(d6, q) + } + + var ret tlv + ret.typ = tlvTypeSMP2 + ret.data = appendU32(ret.data, 11) + ret.data = appendMPIs(ret.data, g2b, c2, d2, g3b, c3, d3, c.smp.pb, c.smp.qb, cp, d5, d6) + return ret +} + +func (c *Conversation) processSMP2(mpis []*big.Int) (out tlv, err error) { + if len(mpis) != 11 { + err = errors.New("otr: incorrect number of arguments in SMP2 message") + return + } + g2b := mpis[0] + c2 := mpis[1] + d2 := mpis[2] + g3b := mpis[3] + c3 := mpis[4] + d3 := mpis[5] + pb := mpis[6] + qb := mpis[7] + cp := mpis[8] + d5 := mpis[9] + d6 := mpis[10] + h := sha256.New() + + r := new(big.Int).Exp(g, d2, p) + s := new(big.Int).Exp(g2b, c2, p) + r.Mul(r, s) + r.Mod(r, p) + s.SetBytes(hashMPIs(h, 3, r)) + if c2.Cmp(s) != 0 { + err = errors.New("otr: ZKP c2 failed in SMP2 message") + return + } + + r.Exp(g, d3, p) + s.Exp(g3b, c3, p) + r.Mul(r, s) + r.Mod(r, p) + s.SetBytes(hashMPIs(h, 4, r)) + if c3.Cmp(s) != 0 { + err = errors.New("otr: ZKP c3 failed in SMP2 message") + return + } + + c.smp.g2 = new(big.Int).Exp(g2b, c.smp.a2, p) + c.smp.g3 = new(big.Int).Exp(g3b, c.smp.a3, p) + + r.Exp(g, d5, p) + s.Exp(c.smp.g2, d6, p) + r.Mul(r, s) + s.Exp(qb, cp, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(c.smp.g3, d5, p) + t := new(big.Int).Exp(pb, cp, p) + s.Mul(s, t) + s.Mod(s, p) + t.SetBytes(hashMPIs(h, 5, s, r)) + if cp.Cmp(t) != 0 { + err = errors.New("otr: ZKP cP failed in SMP2 message") + return + } + + var randBuf [16]byte + r4 := c.randMPI(randBuf[:]) + r5 := c.randMPI(randBuf[:]) + r6 := c.randMPI(randBuf[:]) + r7 := c.randMPI(randBuf[:]) + + pa := new(big.Int).Exp(c.smp.g3, r4, p) + r.Exp(c.smp.g2, c.smp.secret, p) + qa := new(big.Int).Exp(g, r4, p) + qa.Mul(qa, r) + qa.Mod(qa, p) + + r.Exp(g, r5, p) + s.Exp(c.smp.g2, r6, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(c.smp.g3, r5, p) + cp.SetBytes(hashMPIs(h, 6, s, r)) + + r.Mul(r4, cp) + d5 = new(big.Int).Sub(r5, r) + d5.Mod(d5, q) + if d5.Sign() < 0 { + d5.Add(d5, q) + } + + r.Mul(c.smp.secret, cp) + d6 = new(big.Int).Sub(r6, r) + d6.Mod(d6, q) + if d6.Sign() < 0 { + d6.Add(d6, q) + } + + r.ModInverse(qb, p) + qaqb := new(big.Int).Mul(qa, r) + qaqb.Mod(qaqb, p) + + ra := new(big.Int).Exp(qaqb, c.smp.a3, p) + r.Exp(qaqb, r7, p) + s.Exp(g, r7, p) + cr := new(big.Int).SetBytes(hashMPIs(h, 7, s, r)) + + r.Mul(c.smp.a3, cr) + d7 := new(big.Int).Sub(r7, r) + d7.Mod(d7, q) + if d7.Sign() < 0 { + d7.Add(d7, q) + } + + c.smp.g3b = g3b + c.smp.qaqb = qaqb + + r.ModInverse(pb, p) + c.smp.papb = new(big.Int).Mul(pa, r) + c.smp.papb.Mod(c.smp.papb, p) + c.smp.ra = ra + + out.typ = tlvTypeSMP3 + out.data = appendU32(out.data, 8) + out.data = appendMPIs(out.data, pa, qa, cp, d5, d6, ra, cr, d7) + return +} + +func (c *Conversation) processSMP3(mpis []*big.Int) (out tlv, err error) { + if len(mpis) != 8 { + err = errors.New("otr: incorrect number of arguments in SMP3 message") + return + } + pa := mpis[0] + qa := mpis[1] + cp := mpis[2] + d5 := mpis[3] + d6 := mpis[4] + ra := mpis[5] + cr := mpis[6] + d7 := mpis[7] + h := sha256.New() + + r := new(big.Int).Exp(g, d5, p) + s := new(big.Int).Exp(c.smp.g2, d6, p) + r.Mul(r, s) + s.Exp(qa, cp, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(c.smp.g3, d5, p) + t := new(big.Int).Exp(pa, cp, p) + s.Mul(s, t) + s.Mod(s, p) + t.SetBytes(hashMPIs(h, 6, s, r)) + if t.Cmp(cp) != 0 { + err = errors.New("otr: ZKP cP failed in SMP3 message") + return + } + + r.ModInverse(c.smp.qb, p) + qaqb := new(big.Int).Mul(qa, r) + qaqb.Mod(qaqb, p) + + r.Exp(qaqb, d7, p) + s.Exp(ra, cr, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(g, d7, p) + t.Exp(c.smp.g3a, cr, p) + s.Mul(s, t) + s.Mod(s, p) + t.SetBytes(hashMPIs(h, 7, s, r)) + if t.Cmp(cr) != 0 { + err = errors.New("otr: ZKP cR failed in SMP3 message") + return + } + + var randBuf [16]byte + r7 := c.randMPI(randBuf[:]) + rb := new(big.Int).Exp(qaqb, c.smp.b3, p) + + r.Exp(qaqb, r7, p) + s.Exp(g, r7, p) + cr = new(big.Int).SetBytes(hashMPIs(h, 8, s, r)) + + r.Mul(c.smp.b3, cr) + d7 = new(big.Int).Sub(r7, r) + d7.Mod(d7, q) + if d7.Sign() < 0 { + d7.Add(d7, q) + } + + out.typ = tlvTypeSMP4 + out.data = appendU32(out.data, 3) + out.data = appendMPIs(out.data, rb, cr, d7) + + r.ModInverse(c.smp.pb, p) + r.Mul(pa, r) + r.Mod(r, p) + s.Exp(ra, c.smp.b3, p) + if r.Cmp(s) != 0 { + err = smpFailureError + } + + return +} + +func (c *Conversation) processSMP4(mpis []*big.Int) error { + if len(mpis) != 3 { + return errors.New("otr: incorrect number of arguments in SMP4 message") + } + rb := mpis[0] + cr := mpis[1] + d7 := mpis[2] + h := sha256.New() + + r := new(big.Int).Exp(c.smp.qaqb, d7, p) + s := new(big.Int).Exp(rb, cr, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(g, d7, p) + t := new(big.Int).Exp(c.smp.g3b, cr, p) + s.Mul(s, t) + s.Mod(s, p) + t.SetBytes(hashMPIs(h, 8, s, r)) + if t.Cmp(cr) != 0 { + return errors.New("otr: ZKP cR failed in SMP4 message") + } + + r.Exp(rb, c.smp.a3, p) + if r.Cmp(c.smp.papb) != 0 { + return smpFailureError + } + + return nil +} + +func (c *Conversation) generateSMPAbort() tlv { + return tlv{typ: tlvTypeSMPAbort} +} + +func hashMPIs(h hash.Hash, magic byte, mpis ...*big.Int) []byte { + if h != nil { + h.Reset() + } else { + h = sha256.New() + } + + h.Write([]byte{magic}) + for _, mpi := range mpis { + h.Write(appendMPI(nil, mpi)) + } + return h.Sum(nil) +} From 7f02f9e450533170e2470fea90d5b0027515cd49 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Mon, 14 May 2018 16:41:15 +0200 Subject: [PATCH 095/276] Update Golang to 1.10 Signed-off-by: Sebastiaan van Stijn --- Dockerfile | 2 +- circle.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index ac8dbca2..e0d74684 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.8-alpine +FROM golang:1.10-alpine ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV DOCKER_BUILDTAGS include_oss include_gcs diff --git a/circle.yml b/circle.yml index ddc76c86..085ef4f7 100644 --- a/circle.yml +++ b/circle.yml @@ -8,7 +8,7 @@ machine: post: # go - - gvm install go1.8 --prefer-binary --name=stable + - gvm install go1.10 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations From 86b030c1cab8b1a7cc27bdbbf5eee8f3a589ded8 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 29 May 2018 16:02:45 -0700 Subject: [PATCH 096/276] Update project maintainers file Include rules and roles in file. Remove inactive maintainers who have already acknowledged leaving the maintainer role. Signed-off-by: Derek McGowan --- MAINTAINERS | 196 ++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 181 insertions(+), 15 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index bda40015..bf0d8eb0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6,18 +6,194 @@ # It is structured to be consumable by both humans and programs. # To extract its contents programmatically, use any TOML-compliant parser. # -# This file is compiled into the MAINTAINERS file in docker/opensource. -# + +[Rules] + + [Rules.maintainers] + + title = "What is a maintainer?" + + text = """ +There are different types of maintainers, with different responsibilities, but +all maintainers have 3 things in common: + +1) They share responsibility in the project's success. +2) They have made a long-term, recurring time investment to improve the project. +3) They spend that time doing whatever needs to be done, not necessarily what +is the most interesting or fun. + +Maintainers are often under-appreciated, because their work is harder to appreciate. +It's easy to appreciate a really cool and technically advanced feature. It's harder +to appreciate the absence of bugs, the slow but steady improvement in stability, +or the reliability of a release process. But those things distinguish a good +project from a great one. +""" + + [Rules.reviewer] + + title = "What is a reviewer?" + + text = """ +A reviewer is a core role within the project. +They share in reviewing issues and pull requests and their LGTM count towards the +required LGTM count to merge a code change into the project. + +Reviewers are part of the organization but do not have write access. +Becoming a reviewer is a core aspect in the journey to becoming a maintainer. +""" + + [Rules.adding-maintainers] + + title = "How are maintainers added?" + + text = """ +Maintainers are first and foremost contributors that have shown they are +committed to the long term success of a project. Contributors wanting to become +maintainers are expected to be deeply involved in contributing code, pull +request review, and triage of issues in the project for more than three months. + +Just contributing does not make you a maintainer, it is about building trust +with the current maintainers of the project and being a person that they can +depend on and trust to make decisions in the best interest of the project. + +Periodically, the existing maintainers curate a list of contributors that have +shown regular activity on the project over the prior months. From this list, +maintainer candidates are selected and proposed on the maintainers mailing list. + +After a candidate has been announced on the maintainers mailing list, the +existing maintainers are given five business days to discuss the candidate, +raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing +list. Only maintainers of the repository that the candidate is proposed for are +allowed to vote. + +If a candidate is approved, a maintainer will contact the candidate to invite +the candidate to open a pull request that adds the contributor to the +MAINTAINERS file. The candidate becomes a maintainer once the pull request is +merged. +""" + + [Rules.stepping-down-policy] + + title = "Stepping down policy" + + text = """ +Life priorities, interests, and passions can change. If you're a maintainer but +feel you must remove yourself from the list, inform other maintainers that you +intend to step down, and if possible, help find someone to pick up your work. +At the very least, ensure your work can be continued where you left off. + +After you've informed other maintainers, create a pull request to remove +yourself from the MAINTAINERS file. +""" + + [Rules.inactive-maintainers] + + title = "Removal of inactive maintainers" + + text = """ +Similar to the procedure for adding new maintainers, existing maintainers can +be removed from the list if they do not show significant activity on the +project. Periodically, the maintainers review the list of maintainers and their +activity over the last three months. + +If a maintainer has shown insufficient activity over this period, a neutral +person will contact the maintainer to ask if they want to continue being +a maintainer. If the maintainer decides to step down as a maintainer, they +open a pull request to be removed from the MAINTAINERS file. + +If the maintainer wants to remain a maintainer, but is unable to perform the +required duties they can be removed with a vote of at least 66% of +the current maintainers. An e-mail is sent to the +mailing list, inviting maintainers of the project to vote. The voting period is +five business days. Issues related to a maintainer's performance should be +discussed with them among the other maintainers so that they are not surprised +by a pull request removing them. +""" + + [Rules.decisions] + + title = "How are decisions made?" + + text = """ +Short answer: EVERYTHING IS A PULL REQUEST. + +distribution is an open-source project with an open design philosophy. This means +that the repository is the source of truth for EVERY aspect of the project, +including its philosophy, design, road map, and APIs. *If it's part of the +project, it's in the repo. If it's in the repo, it's part of the project.* + +As a result, all decisions can be expressed as changes to the repository. An +implementation change is a change to the source code. An API change is a change +to the API specification. A philosophy change is a change to the philosophy +manifesto, and so on. + +All decisions affecting distribution, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Merge or refuse the pull request. Who does this depends on the nature +of the pull request and which areas of the project it affects. +""" + + [Rules.DCO] + + title = "Helping contributors with the DCO" + + text = """ +The [DCO or `Sign your work`]( +https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work) +requirement is not intended as a roadblock or speed bump. + +Some distribution contributors are not as familiar with `git`, or have used a web +based editor, and thus asking them to `git commit --amend -s` is not the best +way forward. + +In this case, maintainers can update the commits based on clause (c) of the DCO. +The most trivial way for a contributor to allow the maintainer to do this, is to +add a DCO signature in a pull requests's comment, or a maintainer can simply +note that the change is sufficiently trivial that it does not substantially +change the existing contribution - i.e., a spelling change. + +When you add someone's DCO, please also add your own to keep a log. +""" + + [Rules."no direct push"] + + title = "I'm a maintainer. Should I make pull requests too?" + + text = """ +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. +""" + + [Rules.tsc] + + title = "Conflict Resolution and technical disputes" + + text = """ +distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters." + """ + + [Rules.meta] + + title = "How is this process changed?" + + text = "Just like everything else: by making a pull request :)" + +# Current project organization [Org] - [Org."Core maintainers"] + + [Org.Maintainers] people = [ "aaronlehmann", "dmcgowan", "dmp42", - "richardscothern", - "shykes", "stevvooe", ] + [Org.Reviewers] + people = [] [people] @@ -42,16 +218,6 @@ Email = "olivier@docker.com" GitHub = "dmp42" - [people.richardscothern] - Name = "Richard Scothern" - Email = "richard.scothern@gmail.com" - GitHub = "richardscothern" - - [people.shykes] - Name = "Solomon Hykes" - Email = "solomon@docker.com" - GitHub = "shykes" - [people.stevvooe] Name = "Stephen Day" Email = "stephen.day@docker.com" From fc7e8f42d7f88215d756f041ec515863b75aa3bc Mon Sep 17 00:00:00 2001 From: Gladkov Alexey Date: Wed, 30 May 2018 18:00:57 +0200 Subject: [PATCH 097/276] Fix deadlock in the inmemory storage driver According golang documentation [1]: no goroutine should expect to be able to acquire a read lock until the initial read lock is released. [1] https://golang.org/pkg/sync/#RWMutex Signed-off-by: Gladkov Alexey --- registry/storage/driver/inmemory/driver.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/registry/storage/driver/inmemory/driver.go b/registry/storage/driver/inmemory/driver.go index e18e2933..4b1f5f48 100644 --- a/registry/storage/driver/inmemory/driver.go +++ b/registry/storage/driver/inmemory/driver.go @@ -73,7 +73,7 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { d.mutex.RLock() defer d.mutex.RUnlock() - rc, err := d.Reader(ctx, path, 0) + rc, err := d.reader(ctx, path, 0) if err != nil { return nil, err } @@ -108,6 +108,10 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read d.mutex.RLock() defer d.mutex.RUnlock() + return d.reader(ctx, path, offset) +} + +func (d *driver) reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { if offset < 0 { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } From 7bc438a5346023c718671fd8b56f40aa9ddd092c Mon Sep 17 00:00:00 2001 From: Oleg Bulatov Date: Mon, 4 Jun 2018 14:22:33 +0200 Subject: [PATCH 098/276] Use e.Message field instead of e.Code.Message() Signed-off-by: Oleg Bulatov --- registry/handlers/app.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/registry/handlers/app.go b/registry/handlers/app.go index a40a4df3..c863a0ad 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -746,7 +746,7 @@ func (app *App) logError(ctx context.Context, errors errcode.Errors) { case errcode.Error: e, _ := e1.(errcode.Error) c = context.WithValue(ctx, errCodeKey{}, e.Code) - c = context.WithValue(c, errMessageKey{}, e.Code.Message()) + c = context.WithValue(c, errMessageKey{}, e.Message) c = context.WithValue(c, errDetailKey{}, e.Detail) case errcode.ErrorCode: e, _ := e1.(errcode.ErrorCode) From 62797237b93d6e67e4e5879d83420caf92bf3f69 Mon Sep 17 00:00:00 2001 From: Yu Wang Date: Mon, 21 May 2018 12:05:11 -0700 Subject: [PATCH 099/276] closes #2496 and #2552 Update Azure SDK with release v16.2.1 Update Azure autorest SDK with release v10.8.1 Signed-off-by: Yu Wang --- registry/storage/driver/azure/azure.go | 93 +- vendor.conf | 6 +- .../github.com/Azure/azure-sdk-for-go/NOTICE | 5 + .../Azure/azure-sdk-for-go/README.md | 321 +++- .../Azure/azure-sdk-for-go/storage/README.md | 18 + .../azure-sdk-for-go/storage/appendblob.go | 91 ++ .../azure-sdk-for-go/storage/authorization.go | 67 +- .../Azure/azure-sdk-for-go/storage/blob.go | 1252 +++++---------- .../azure-sdk-for-go/storage/blobsasuri.go | 174 ++ .../storage/blobserviceclient.go | 114 +- .../azure-sdk-for-go/storage/blockblob.go | 270 ++++ .../Azure/azure-sdk-for-go/storage/client.go | 728 +++++++-- .../azure-sdk-for-go/storage/commonsasuri.go | 38 + .../azure-sdk-for-go/storage/container.go | 426 ++++- .../azure-sdk-for-go/storage/copyblob.go | 237 +++ .../azure-sdk-for-go/storage/directory.go | 81 +- .../Azure/azure-sdk-for-go/storage/entity.go | 456 ++++++ .../Azure/azure-sdk-for-go/storage/file.go | 252 +-- .../storage/fileserviceclient.go | 149 +- .../azure-sdk-for-go/storage/leaseblob.go | 201 +++ .../Azure/azure-sdk-for-go/storage/message.go | 170 ++ .../Azure/azure-sdk-for-go/storage/odata.go | 47 + .../azure-sdk-for-go/storage/pageblob.go | 203 +++ .../Azure/azure-sdk-for-go/storage/queue.go | 603 ++++--- .../azure-sdk-for-go/storage/queuesasuri.go | 146 ++ .../storage/queueserviceclient.go | 30 +- .../Azure/azure-sdk-for-go/storage/share.go | 94 +- .../azure-sdk-for-go/storage/storagepolicy.go | 14 + .../storage/storageservice.go | 29 +- .../Azure/azure-sdk-for-go/storage/table.go | 385 +++-- .../azure-sdk-for-go/storage/table_batch.go | 328 ++++ .../storage/table_entities.go | 345 ---- .../storage/tableserviceclient.go | 192 ++- .../Azure/azure-sdk-for-go/storage/util.go | 165 +- .../Azure/azure-sdk-for-go/storage/version.go | 5 - .../Azure/azure-sdk-for-go/version/version.go | 21 + vendor/github.com/Azure/go-autorest/README.md | 29 +- .../Azure/go-autorest/autorest/adal/README.md | 292 ++++ .../Azure/go-autorest/autorest/adal/config.go | 81 + .../autorest/{azure => adal}/devicetoken.go | 141 +- .../autorest/{azure => adal}/persist.go | 16 +- .../Azure/go-autorest/autorest/adal/sender.go | 60 + .../Azure/go-autorest/autorest/adal/token.go | 795 ++++++++++ .../go-autorest/autorest/authorization.go | 259 +++ .../Azure/go-autorest/autorest/autorest.go | 35 + .../Azure/go-autorest/autorest/azure/async.go | 339 +++- .../Azure/go-autorest/autorest/azure/azure.go | 153 +- .../go-autorest/autorest/azure/config.go | 13 - .../autorest/azure/environments.go | 96 +- .../autorest/azure/metadata_environment.go | 245 +++ .../Azure/go-autorest/autorest/azure/rp.go | 200 +++ .../Azure/go-autorest/autorest/azure/token.go | 363 ----- .../Azure/go-autorest/autorest/client.go | 47 +- .../Azure/go-autorest/autorest/date/date.go | 14 + .../Azure/go-autorest/autorest/date/time.go | 14 + .../go-autorest/autorest/date/timerfc1123.go | 14 + .../go-autorest/autorest/date/unixtime.go | 123 ++ .../go-autorest/autorest/date/utility.go | 14 + .../Azure/go-autorest/autorest/error.go | 18 + .../Azure/go-autorest/autorest/preparer.go | 75 +- .../Azure/go-autorest/autorest/responder.go | 14 + .../go-autorest/autorest/retriablerequest.go | 52 + .../autorest/retriablerequest_1.7.go | 54 + .../autorest/retriablerequest_1.8.go | 66 + .../Azure/go-autorest/autorest/sender.go | 95 +- .../Azure/go-autorest/autorest/utility.go | 107 +- .../Azure/go-autorest/autorest/version.go | 31 +- vendor/github.com/marstr/guid/LICENSE.txt | 21 + vendor/github.com/marstr/guid/README.md | 27 + vendor/github.com/marstr/guid/guid.go | 301 ++++ vendor/github.com/satori/go.uuid/LICENSE | 20 + vendor/github.com/satori/go.uuid/README.md | 65 + vendor/github.com/satori/go.uuid/codec.go | 206 +++ vendor/github.com/satori/go.uuid/generator.go | 239 +++ vendor/github.com/satori/go.uuid/sql.go | 78 + vendor/github.com/satori/go.uuid/uuid.go | 161 ++ .../x/crypto/otr/libotr_test_helper.c | 171 ++ vendor/golang.org/x/crypto/otr/otr.go | 1400 +++++++++++++++++ vendor/golang.org/x/crypto/otr/smp.go | 572 +++++++ 79 files changed, 12075 insertions(+), 2797 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/NOTICE create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/message.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/version/version.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/README.md create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/config.go rename vendor/github.com/Azure/go-autorest/autorest/{azure => adal}/devicetoken.go (59%) rename vendor/github.com/Azure/go-autorest/autorest/{azure => adal}/persist.go (74%) create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/sender.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/config.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/rp.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/token.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go create mode 100644 vendor/github.com/marstr/guid/LICENSE.txt create mode 100644 vendor/github.com/marstr/guid/README.md create mode 100644 vendor/github.com/marstr/guid/guid.go create mode 100644 vendor/github.com/satori/go.uuid/LICENSE create mode 100644 vendor/github.com/satori/go.uuid/README.md create mode 100644 vendor/github.com/satori/go.uuid/codec.go create mode 100644 vendor/github.com/satori/go.uuid/generator.go create mode 100644 vendor/github.com/satori/go.uuid/sql.go create mode 100644 vendor/github.com/satori/go.uuid/uuid.go create mode 100644 vendor/golang.org/x/crypto/otr/libotr_test_helper.c create mode 100644 vendor/golang.org/x/crypto/otr/otr.go create mode 100644 vendor/golang.org/x/crypto/otr/smp.go diff --git a/registry/storage/driver/azure/azure.go b/registry/storage/driver/azure/azure.go index 728d8a85..993f43c4 100644 --- a/registry/storage/driver/azure/azure.go +++ b/registry/storage/driver/azure/azure.go @@ -87,7 +87,7 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) { // Create registry container containerRef := blobClient.GetContainerReference(container) - if _, err = containerRef.CreateIfNotExists(); err != nil { + if _, err = containerRef.CreateIfNotExists(nil); err != nil { return nil, err } @@ -104,7 +104,8 @@ func (d *driver) Name() string { // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - blob, err := d.client.GetBlob(d.container, path) + blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path) + blob, err := blobRef.Get(nil) if err != nil { if is404(err) { return nil, storagedriver.PathNotFoundError{Path: path} @@ -118,7 +119,10 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - if limit := 64 * 1024 * 1024; len(contents) > limit { // max size for block blobs uploaded via single "Put Blob" + // max size for block blobs uploaded via single "Put Blob" for version after "2016-05-31" + // https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#remarks + const limit = 256 * 1024 * 1024 + if len(contents) > limit { return fmt.Errorf("uploading %d bytes with PutContent is not supported; limit: %d bytes", len(contents), limit) } @@ -133,41 +137,49 @@ func (d *driver) PutContent(ctx context.Context, path string, contents []byte) e // losing the existing data while migrating it to BlockBlob type. However, // expectation is the clients pushing will be retrying when they get an error // response. - props, err := d.client.GetBlobProperties(d.container, path) + blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path) + err := blobRef.GetProperties(nil) if err != nil && !is404(err) { return fmt.Errorf("failed to get blob properties: %v", err) } - if err == nil && props.BlobType != azure.BlobTypeBlock { - if err := d.client.DeleteBlob(d.container, path, nil); err != nil { - return fmt.Errorf("failed to delete legacy blob (%s): %v", props.BlobType, err) + if err == nil && blobRef.Properties.BlobType != azure.BlobTypeBlock { + if err := blobRef.Delete(nil); err != nil { + return fmt.Errorf("failed to delete legacy blob (%s): %v", blobRef.Properties.BlobType, err) } } r := bytes.NewReader(contents) - return d.client.CreateBlockBlobFromReader(d.container, path, uint64(len(contents)), r, nil) + // reset properties to empty before doing overwrite + blobRef.Properties = azure.BlobProperties{} + return blobRef.CreateBlockBlobFromReader(r, nil) } // Reader retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - if ok, err := d.client.BlobExists(d.container, path); err != nil { + blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path) + if ok, err := blobRef.Exists(); err != nil { return nil, err } else if !ok { return nil, storagedriver.PathNotFoundError{Path: path} } - info, err := d.client.GetBlobProperties(d.container, path) + err := blobRef.GetProperties(nil) if err != nil { return nil, err } - + info := blobRef.Properties size := int64(info.ContentLength) if offset >= size { return ioutil.NopCloser(bytes.NewReader(nil)), nil } - bytesRange := fmt.Sprintf("%v-", offset) - resp, err := d.client.GetBlobRange(d.container, path, bytesRange, nil) + resp, err := blobRef.GetRange(&azure.GetBlobRangeOptions{ + Range: &azure.BlobRange{ + Start: uint64(offset), + End: 0, + }, + }) if err != nil { return nil, err } @@ -177,20 +189,22 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read // Writer returns a FileWriter which will store the content written to it // at the location designated by "path" after the call to Commit. func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { - blobExists, err := d.client.BlobExists(d.container, path) + blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path) + blobExists, err := blobRef.Exists() if err != nil { return nil, err } var size int64 if blobExists { if append { - blobProperties, err := d.client.GetBlobProperties(d.container, path) + err = blobRef.GetProperties(nil) if err != nil { return nil, err } + blobProperties := blobRef.Properties size = blobProperties.ContentLength } else { - err := d.client.DeleteBlob(d.container, path, nil) + err = blobRef.Delete(nil) if err != nil { return nil, err } @@ -199,7 +213,7 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged if append { return nil, storagedriver.PathNotFoundError{Path: path} } - err := d.client.PutAppendBlob(d.container, path, nil) + err = blobRef.PutAppendBlob(nil) if err != nil { return nil, err } @@ -211,24 +225,21 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path) // Check if the path is a blob - if ok, err := d.client.BlobExists(d.container, path); err != nil { + if ok, err := blobRef.Exists(); err != nil { return nil, err } else if ok { - blob, err := d.client.GetBlobProperties(d.container, path) - if err != nil { - return nil, err - } - - mtim, err := time.Parse(http.TimeFormat, blob.LastModified) + err = blobRef.GetProperties(nil) if err != nil { return nil, err } + blobProperties := blobRef.Properties return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ Path: path, - Size: int64(blob.ContentLength), - ModTime: mtim, + Size: int64(blobProperties.ContentLength), + ModTime: time.Time(blobProperties.LastModified), IsDir: false, }}, nil } @@ -281,8 +292,10 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath) - err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) + srcBlobRef := d.client.GetContainerReference(d.container).GetBlobReference(sourcePath) + sourceBlobURL := srcBlobRef.GetURL() + destBlobRef := d.client.GetContainerReference(d.container).GetBlobReference(destPath) + err := destBlobRef.Copy(sourceBlobURL, nil) if err != nil { if is404(err) { return storagedriver.PathNotFoundError{Path: sourcePath} @@ -290,12 +303,13 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e return err } - return d.client.DeleteBlob(d.container, sourcePath, nil) + return srcBlobRef.Delete(nil) } // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(ctx context.Context, path string) error { - ok, err := d.client.DeleteBlobIfExists(d.container, path, nil) + blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path) + ok, err := blobRef.DeleteIfExists(nil) if err != nil { return err } @@ -310,7 +324,8 @@ func (d *driver) Delete(ctx context.Context, path string) error { } for _, b := range blobs { - if err = d.client.DeleteBlob(d.container, b, nil); err != nil { + blobRef = d.client.GetContainerReference(d.container).GetBlobReference(b) + if err = blobRef.Delete(nil); err != nil { return err } } @@ -333,7 +348,15 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int expiresTime = t } } - return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") + blobRef := d.client.GetContainerReference(d.container).GetBlobReference(path) + return blobRef.GetSASURI(azure.BlobSASOptions{ + BlobServiceSASPermissions: azure.BlobServiceSASPermissions{ + Read: true, + }, + SASOptions: azure.SASOptions{ + Expiry: expiresTime, + }, + }) } // Walk traverses a filesystem defined within driver, starting @@ -467,7 +490,8 @@ func (w *writer) Cancel() error { return fmt.Errorf("already committed") } w.cancelled = true - return w.driver.client.DeleteBlob(w.driver.container, w.path, nil) + blobRef := w.driver.client.GetContainerReference(w.driver.container).GetBlobReference(w.path) + return blobRef.Delete(nil) } func (w *writer) Commit() error { @@ -490,12 +514,13 @@ type blockWriter struct { func (bw *blockWriter) Write(p []byte) (int, error) { n := 0 + blobRef := bw.client.GetContainerReference(bw.container).GetBlobReference(bw.path) for offset := 0; offset < len(p); offset += maxChunkSize { chunkSize := maxChunkSize if offset+chunkSize > len(p) { chunkSize = len(p) - offset } - err := bw.client.AppendBlock(bw.container, bw.path, p[offset:offset+chunkSize], nil) + err := blobRef.AppendBlock(p[offset:offset+chunkSize], nil) if err != nil { return n, err } diff --git a/vendor.conf b/vendor.conf index d0ebadf8..b67e03ad 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,5 +1,5 @@ -github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e -github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 +github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b +github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a @@ -19,6 +19,8 @@ github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f +github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE new file mode 100644 index 00000000..2d1d7260 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE @@ -0,0 +1,5 @@ +Microsoft Azure-SDK-for-Go +Copyright 2014-2017 Microsoft + +This product includes software developed at +the Microsoft Corporation (https://www.microsoft.com). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/README.md b/vendor/github.com/Azure/azure-sdk-for-go/README.md index 8de42e2d..6d446d74 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/README.md @@ -1,60 +1,301 @@ -# Microsoft Azure SDK for Go -[![GoDoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go) -[![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go) +# Azure SDK for Go + +[![godoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go) +[![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go) [![Go Report Card](https://goreportcard.com/badge/github.com/Azure/azure-sdk-for-go)](https://goreportcard.com/report/github.com/Azure/azure-sdk-for-go) +azure-sdk-for-go provides Go packages for managing and using Azure services. It has been +tested with Go 1.8, 1.9 and 1.10. -This is Microsoft Azure's core repository for hosting Go packages which offer a more convenient way of targeting Azure -REST endpoints. Here, you'll find a mix of code generated by [Autorest](https://github.com/Azure/autorest) and hand -maintained packages. +To be notified about updates and changes, subscribe to the [Azure update +feed](https://azure.microsoft.com/updates/). -> **NOTE:** This repository is under heavy ongoing development and should be considered a preview. Vendoring your -dependencies is always a good idea, but it is doubly important if you're consuming this library. +Users of the SDK may prefer to jump right in to our samples repo at +[github.com/Azure-Samples/azure-sdk-for-go-samples][samples_repo]. -# Installation -- If you don't already have it, install [the Go Programming Language](https://golang.org/dl/). -- Go get the SDK: +### Build Details -``` -$ go get -u github.com/Azure/azure-sdk-for-go +Most packages in the SDK are generated from [Azure API specs][azure_rest_specs] +using [Azure/autorest.go][] and [Azure/autorest][]. These generated packages +depend on the HTTP client implemented at [Azure/go-autorest][]. + +[azure_rest_specs]: https://github.com/Azure/azure-rest-api-specs +[Azure/autorest]: https://github.com/Azure/autorest +[Azure/autorest.go]: https://github.com/Azure/autorest.go +[Azure/go-autorest]: https://github.com/Azure/go-autorest + +The SDK codebase adheres to [semantic versioning](https://semver.org) and thus +avoids breaking changes other than at major (x.0.0) releases. However, +occasionally Azure API fixes require breaking updates within an individual +package; these exceptions are noted in release changelogs. + +To more reliably manage dependencies like the Azure SDK in your applications we +recommend [golang/dep](https://github.com/golang/dep). + +# Install and Use: + +### Install + +```sh +$ go get -u github.com/Azure/azure-sdk-for-go/... ``` -> **IMPORTANT:** We highly suggest vendoring Azure SDK for Go as a dependency. For vendoring dependencies, Azure SDK -for Go uses [glide](https://github.com/Masterminds/glide). +or if you use dep, within your repo run: + +```sh +$ dep ensure -add github.com/Azure/azure-sdk-for-go +``` + +If you need to install Go, follow [the official instructions](https://golang.org/dl/). + +### Use + +For complete examples of many scenarios see [Azure-Samples/azure-sdk-for-go-samples][samples_repo]. + +1. Import a package from the [services][services_dir] directory. +2. Create and authenticate a client with a `New*Client` func, e.g. + `c := compute.NewVirtualMachinesClient(...)`. +3. Invoke API methods using the client, e.g. `c.CreateOrUpdate(...)`. +4. Handle responses. + +[services_dir]: https://github.com/Azure/azure-sdk-for-go/tree/master/services + +For example, to create a new virtual network (substitute your own values for +strings in angle brackets): + +Note: For more on authentication and the `Authorizer` interface see [the next + section](#authentication). + +```go +package main + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/Azure/go-autorest/autorest/to" +) + +func main() { + vnetClient := network.NewVirtualNetworksClient("") + authorizer, err := auth.NewAuthorizerFromEnvironment() + + if err == nil { + vnetClient.Authorizer = authorizer + } + + vnetClient.CreateOrUpdate(context.Background(), + "", + "", + network.VirtualNetwork{ + Location: to.StringPtr(""), + VirtualNetworkPropertiesFormat: &network.VirtualNetworkPropertiesFormat{ + AddressSpace: &network.AddressSpace{ + AddressPrefixes: &[]string{"10.0.0.0/8"}, + }, + Subnets: &[]network.Subnet{ + { + Name: to.StringPtr(""), + SubnetPropertiesFormat: &network.SubnetPropertiesFormat{ + AddressPrefix: to.StringPtr("10.0.0.0/16"), + }, + }, + { + Name: to.StringPtr(""), + SubnetPropertiesFormat: &network.SubnetPropertiesFormat{ + AddressPrefix: to.StringPtr("10.1.0.0/16"), + }, + }, + }, + }, + }) +} +``` + +### Authentication + +Most SDK operations require an OAuth token for authentication and authorization. These are +made available in the Go SDK For Azure through types implementing the `Authorizer` interface. +You can get one from Azure Active Directory using the SDK's +[authentication](https://godoc.org/github.com/Azure/go-autorest/autorest/azure/auth) package. The `Authorizer` returned should +be set as the authorizer for the resource client, as shown in the [previous section](#use). + +You can get an authorizer in the following ways: +1. From the **Environment**: + - Use `auth.auth.NewAuthorizerFromEnvironment()`. This call will try to get an authorizer based on the environment +variables with different types of credentials in the following order: + 1. **Client Credentials**: Uses the AAD App Secret for auth. + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CLIENT_SECRET`: Specifies the app secret to use. + 2. **Client Certificate**: Uses a certificate that was configured on the AAD Service Principal. + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use. + - `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use. + 3. **Username Pasword**: Uses a username and a password for auth. This is not recommended. Use `Device Flow` Auth instead for user interactive acccess. + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_USERNAME`: Specifies the username to use. + - `AZURE_PASSWORD`: Specifies the password to use. + 4. **MSI**: Only available for apps running in Azure. No configuration needed as it leverages the fact that the app is running in Azure. See [Azure Managed Service Identity](https://docs.microsoft.com/en-us/azure/active-directory/msi-overview). + + - Optionally, the following environment variables can be defined: + - `AZURE_ENVIRONMENT`: Specifies the Azure Environment to use. If not set, it defaults to `AzurePublicCloud`. (Not applicable to MSI based auth) + - `AZURE_AD_RESOURCE`: Specifies the AAD resource ID to use. If not set, it defaults to `ResourceManagerEndpoint`which allows management operations against Azure Resource Manager. + +2. From an **Auth File**: + - Create a service principal and output the file content using `az ad sp create-for-rbac --sdk-auth` from the Azure CLI.For more details see [az ad sp](https://docs.microsoft.com/en-us/cli/azure/ad/sp). + - Set environment variable `AZURE_AUTH_LOCATION` for finding the file. + - Use `auth.NewAuthorizerFromFile()` for getting the `Authorizer` based on the auth file. + +3. From **Device Flow** by configuring `auth.DeviceFlowConfig` and calling the `Authorizer()` method. + +Note: To authenticate you first need to create a service principal in Azure. To create a new service principal, run +`az ad sp create-for-rbac -n ""` in the +[azure-cli](https://github.com/Azure/azure-cli). See +[these docs](https://docs.microsoft.com/cli/azure/create-an-azure-service-principal-azure-cli?view=azure-cli-latest) +for more info. Copy the new principal's ID, secret, and tenant ID for use in your app. + +Alternatively, if your apps are running in Azure, you can now leverage the [Managed Service Identity](https://docs.microsoft.com/en-us/azure/active-directory/msi-overview). # Versioning -## SDK Versions -The tags in this repository are based on, but do not conform to [SemVer.org's recommendations](http://semver.org/). -For now, the "-beta" tag is an indicator that we are still in preview and still are planning on releasing some breaking -changes. -In repositories that are children of this one, [storage for example](https://github.com/Azure/azure-storage-go), we -have adopted SemVer.org's recommendations. +azure-sdk-for-go provides at least a basic Go binding for every Azure API. To +provide maximum flexibility to users, the SDK even includes previous versions of +Azure APIs which are still in use. This enables us to support users of the +most updated Azure datacenters, regional datacenters with earlier APIs, and +even on-premises installations of Azure Stack. -## Azure Versions -Azure services _mostly_ do not use SemVer based versions. Rather, they use profiles identified by dates. One will often -see this casually referred to as an "APIVersion". At the moment, our SDK only supports the most recent profiles. In -order to lock to an API version, one must also lock to an SDK version. However, as discussed in -[#517](https://github.com/Azure/azure-sdk-for-go/issues/517), our objective is to reorganize and publish independent -packages for each profile. In that way, we'll be able to have parallel support in a single SDK version for all -APIVersions supported by Azure. +**SDK versions** apply globally and are tracked by git +[tags](https://github.com/Azure/azure-sdk-for-go/tags). These are in x.y.z form +and generally adhere to [semantic versioning](https://semver.org) specifications. -# Documentation +**Service API versions** are generally represented by a date string and are +tracked by offering separate packages for each version. For example, to choose the +latest API versions for Compute and Network, use the following imports: -- Azure SDK for Go Documentation is available at [GoDoc.org](http://godoc.org/github.com/Azure/azure-sdk-for-go/). -- Azure REST APIs used by packages in this repository are documented at [Microsoft Docs, Azure REST](https://docs.microsoft.com/en-us/rest/api/). -- Azure Services are discussed in detail at [Microsoft Docs, Azure Services](https://docs.microsoft.com/en-us/azure/#pivot=services). +```go +import ( + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" +) +``` -# License +Occasionally service-side changes require major changes to existing versions. +These cases are noted in the changelog. -This project is published under [Apache 2.0 License](LICENSE). +All avilable services and versions are listed under the `services/` path in +this repo and in [GoDoc][services_godoc]. Run `find ./services -type d +-mindepth 3` to list all available service packages. +[services_godoc]: https://godoc.org/github.com/Azure/azure-sdk-for-go/services -# Contribute +### Profiles -If you would like to become an active contributor to this project please follow the instructions provided in [Microsoft -Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/). +Azure **API profiles** specify subsets of Azure APIs and versions. Profiles can provide: + +* **stability** for your application by locking to specific API versions; and/or +* **compatibility** for your application with Azure Stack and regional Azure datacenters. + +In the Go SDK, profiles are available under the `profiles/` path and their +component API versions are aliases to the true service package under +`services/`. You can use them as follows: + +```go +import "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/compute/mgmt/compute" +import "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/network/mgmt/network" +import "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage" +``` + +The 2017-03-09 profile is the only one currently available and is for use in +hybrid Azure and Azure Stack environments. More profiles are under development. + +In addition to versioned profiles, we also provide two special profiles +`latest` and `preview`. These *always* include the most recent respective stable or +preview API versions for each service, even when updating them to do so causes +breaking changes. That is, these do *not* adhere to semantic versioning rules. + +The `latest` and `preview` profiles can help you stay up to date with API +updates as you build applications. Since they are by definition not stable, +however, they **should not** be used in production apps. Instead, choose the +latest specific API version (or an older one if necessary) from the `services/` +path. + +As an example, to automatically use the most recent Compute APIs, use one of +the following imports: + +```go +import "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" +import "github.com/Azure/azure-sdk-for-go/profiles/preview/compute/mgmt/compute" +``` + +## Inspecting and Debugging + +All clients implement some handy hooks to help inspect the underlying requests being made to Azure. + +- `RequestInspector`: View and manipulate the go `http.Request` before it's sent +- `ResponseInspector`: View the `http.Response` received + +Here is an example of how these can be used with `net/http/httputil` to see requests and responses. + +```go + +vnetClient := network.NewVirtualNetworksClient("") +vnetClient.RequestInspector = LogRequest() +vnetClient.ResponseInspector = LogResponse() + +... + +func LogRequest() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + log.Println(err) + } + dump, _ := httputil.DumpRequestOut(r, true) + log.Println(string(dump)) + return r, err + }) + } +} + +func LogResponse() autorest.RespondDecorator { + return func(p autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(r *http.Response) error { + err := p.Respond(r) + if err != nil { + log.Println(err) + } + dump, _ := httputil.DumpResponse(r, true) + log.Println(string(dump)) + return err + }) + } +} +``` + +# Resources + +- SDK docs are at [godoc.org](https://godoc.org/github.com/Azure/azure-sdk-for-go/). +- SDK samples are at [Azure-Samples/azure-sdk-for-go-samples](https://github.com/Azure-Samples/azure-sdk-for-go-samples). +- SDK notifications are published via the [Azure update feed](https://azure.microsoft.com/updates/). +- Azure API docs are at [docs.microsoft.com/rest/api](https://docs.microsoft.com/rest/api/). +- General Azure docs are at [docs.microsoft.com/azure](https://docs.microsoft.com/azure). + +### Other Azure packages for Go + +- [Azure Storage Blobs](https://azure.microsoft.com/services/storage/blobs) - [github.com/Azure/azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) +- [Azure Applications Insights](https://azure.microsoft.com/en-us/services/application-insights/) - [github.com/Microsoft/ApplicationInsights-Go](https://github.com/Microsoft/ApplicationInsights-Go) + +## License + +Apache 2.0, see [LICENSE](./LICENSE). + +## Contribute + +See [CONTRIBUTING.md](./CONTRIBUTING.md). + +[samples_repo]: https://github.com/Azure-Samples/azure-sdk-for-go-samples -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact -[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md new file mode 100644 index 00000000..49e48cdf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md @@ -0,0 +1,18 @@ +# Azure Storage SDK for Go (Preview) + +:exclamation: IMPORTANT: This package is in maintenance only and will be deprecated in the +future. Consider using the new package for blobs currently in preview at +[github.com/Azure/azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go). +New Table, Queue and File packages are also in development. + +The `github.com/Azure/azure-sdk-for-go/storage` package is used to manage +[Azure Storage](https://docs.microsoft.com/en-us/azure/storage/) data plane +resources: containers, blobs, tables, and queues. + +To manage storage *accounts* use Azure Resource Manager (ARM) via the packages +at [github.com/Azure/azure-sdk-for-go/services/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/services/storage). + +This package also supports the [Azure Storage +Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) +(Windows only). + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go new file mode 100644 index 00000000..8b5b96d4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go @@ -0,0 +1,91 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "time" +) + +// PutAppendBlob initializes an empty append blob with specified name. An +// append blob must be created using this method before appending blocks. +// +// See CreateBlockBlobFromReader for more info on creating blobs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeAppend) +} + +// AppendBlockOptions includes the options for an append block operation +type AppendBlockOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + MaxSize *uint `header:"x-ms-blob-condition-maxsize"` + AppendPosition *uint `header:"x-ms-blob-condition-appendpos"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` + ContentMD5 bool +} + +// AppendBlock appends a block to an append blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block +func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { + params := url.Values{"comp": {"appendblock"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + if options.ContentMD5 { + md5sum := md5.Sum(chunk) + headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:]) + } + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeAppend) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go index 89a0d0b3..76794c30 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go @@ -1,6 +1,20 @@ // Package storage provides clients for Microsoft Azure Storage Services. package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "fmt" @@ -20,33 +34,39 @@ const ( sharedKeyLiteForTable authentication = "sharedKeyLiteTable" // headers - headerAuthorization = "Authorization" - headerContentLength = "Content-Length" - headerDate = "Date" - headerXmsDate = "x-ms-date" - headerXmsVersion = "x-ms-version" - headerContentEncoding = "Content-Encoding" - headerContentLanguage = "Content-Language" - headerContentType = "Content-Type" - headerContentMD5 = "Content-MD5" - headerIfModifiedSince = "If-Modified-Since" - headerIfMatch = "If-Match" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerRange = "Range" + headerAcceptCharset = "Accept-Charset" + headerAuthorization = "Authorization" + headerContentLength = "Content-Length" + headerDate = "Date" + headerXmsDate = "x-ms-date" + headerXmsVersion = "x-ms-version" + headerContentEncoding = "Content-Encoding" + headerContentLanguage = "Content-Language" + headerContentType = "Content-Type" + headerContentMD5 = "Content-MD5" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" + headerDataServiceVersion = "DataServiceVersion" + headerMaxDataServiceVersion = "MaxDataServiceVersion" + headerContentTransferEncoding = "Content-Transfer-Encoding" ) func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { - authHeader, err := c.getSharedKey(verb, url, headers, auth) - if err != nil { - return nil, err + if !c.sasClient { + authHeader, err := c.getSharedKey(verb, url, headers, auth) + if err != nil { + return nil, err + } + headers[headerAuthorization] = authHeader } - headers[headerAuthorization] = authHeader return headers, nil } func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { - canRes, err := c.buildCanonicalizedResource(url, auth) + canRes, err := c.buildCanonicalizedResource(url, auth, false) if err != nil { return "", err } @@ -58,15 +78,18 @@ func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth return c.createAuthorizationHeader(canString, auth), nil } -func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) { +func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) { errMsg := "buildCanonicalizedResource error: %s" u, err := url.Parse(uri) if err != nil { return "", fmt.Errorf(errMsg, err.Error()) } - cr := bytes.NewBufferString("/") - cr.WriteString(c.getCanonicalizedAccountName()) + cr := bytes.NewBufferString("") + if c.accountName != StorageEmulatorAccountName || !sas { + cr.WriteString("/") + cr.WriteString(c.getCanonicalizedAccountName()) + } if len(u.Path) > 0 { // Any portion of the CanonicalizedResource string that is derived from diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go index 6b332ea4..1d224862 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -1,7 +1,20 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( - "bytes" "encoding/xml" "errors" "fmt" @@ -15,11 +28,26 @@ import ( // A Blob is an entry in BlobListResponse. type Blob struct { + Container *Container Name string `xml:"Name"` + Snapshot time.Time `xml:"Snapshot"` Properties BlobProperties `xml:"Properties"` Metadata BlobMetadata `xml:"Metadata"` } +// PutBlobOptions includes the options any put blob operation +// (page, block, append) +type PutBlobOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + // BlobMetadata is a set of custom name/value pairs. // // See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx @@ -67,34 +95,28 @@ func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) erro // BlobProperties contains various properties of a blob // returned in various endpoints like ListBlobs or GetBlobProperties. type BlobProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - ContentMD5 string `xml:"Content-MD5"` - ContentLength int64 `xml:"Content-Length"` - ContentType string `xml:"Content-Type"` - ContentEncoding string `xml:"Content-Encoding"` - CacheControl string `xml:"Cache-Control"` - ContentLanguage string `xml:"Cache-Language"` - BlobType BlobType `xml:"x-ms-blob-blob-type"` - SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` - CopyID string `xml:"CopyId"` - CopyStatus string `xml:"CopyStatus"` - CopySource string `xml:"CopySource"` - CopyProgress string `xml:"CopyProgress"` - CopyCompletionTime string `xml:"CopyCompletionTime"` - CopyStatusDescription string `xml:"CopyStatusDescription"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` -} - -// BlobHeaders contains various properties of a blob and is an entry -// in SetBlobProperties -type BlobHeaders struct { - ContentMD5 string `header:"x-ms-blob-content-md5"` - ContentLanguage string `header:"x-ms-blob-content-language"` - ContentEncoding string `header:"x-ms-blob-content-encoding"` - ContentType string `header:"x-ms-blob-content-type"` - CacheControl string `header:"x-ms-blob-cache-control"` + LastModified TimeRFC1123 `xml:"Last-Modified"` + Etag string `xml:"Etag"` + ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"` + ContentLength int64 `xml:"Content-Length"` + ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"` + ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"` + CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"` + ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"` + ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"` + BlobType BlobType `xml:"BlobType"` + SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` + CopyID string `xml:"CopyId"` + CopyStatus string `xml:"CopyStatus"` + CopySource string `xml:"CopySource"` + CopyProgress string `xml:"CopyProgress"` + CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"` + CopyStatusDescription string `xml:"CopyStatusDescription"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` + ServerEncrypted bool `xml:"ServerEncrypted"` + IncrementalCopy bool `xml:"IncrementalCopy"` } // BlobType defines the type of the Azure Blob. @@ -107,460 +129,356 @@ const ( BlobTypeAppend BlobType = "AppendBlob" ) -// PageWriteType defines the type updates that are going to be -// done on the page blob. -type PageWriteType string - -// Types of operations on page blobs -const ( - PageWriteTypeUpdate PageWriteType = "update" - PageWriteTypeClear PageWriteType = "clear" -) - -const ( - blobCopyStatusPending = "pending" - blobCopyStatusSuccess = "success" - blobCopyStatusAborted = "aborted" - blobCopyStatusFailed = "failed" -) - -// lease constants. -const ( - leaseHeaderPrefix = "x-ms-lease-" - headerLeaseID = "x-ms-lease-id" - leaseAction = "x-ms-lease-action" - leaseBreakPeriod = "x-ms-lease-break-period" - leaseDuration = "x-ms-lease-duration" - leaseProposedID = "x-ms-proposed-lease-id" - leaseTime = "x-ms-lease-time" - - acquireLease = "acquire" - renewLease = "renew" - changeLease = "change" - releaseLease = "release" - breakLease = "break" -) - -// BlockListType is used to filter out types of blocks in a Get Blocks List call -// for a block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all -// block types. -type BlockListType string - -// Filters for listing blocks in block blobs -const ( - BlockListTypeAll BlockListType = "all" - BlockListTypeCommitted BlockListType = "committed" - BlockListTypeUncommitted BlockListType = "uncommitted" -) - -// Maximum sizes (per REST API) for various concepts -const ( - MaxBlobBlockSize = 4 * 1024 * 1024 - MaxBlobPageSize = 4 * 1024 * 1024 -) - -// BlockStatus defines states a block for a block blob can -// be in. -type BlockStatus string - -// List of statuses that can be used to refer to a block in a block list -const ( - BlockStatusUncommitted BlockStatus = "Uncommitted" - BlockStatusCommitted BlockStatus = "Committed" - BlockStatusLatest BlockStatus = "Latest" -) - -// Block is used to create Block entities for Put Block List -// call. -type Block struct { - ID string - Status BlockStatus +func (b *Blob) buildPath() string { + return b.Container.buildPath() + "/" + b.Name } -// BlockListResponse contains the response fields from Get Block List call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -type BlockListResponse struct { - XMLName xml.Name `xml:"BlockList"` - CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` - UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` -} - -// BlockResponse contains the block information returned -// in the GetBlockListCall. -type BlockResponse struct { - Name string `xml:"Name"` - Size int64 `xml:"Size"` -} - -// GetPageRangesResponse contains the response fields from -// Get Page Ranges call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type GetPageRangesResponse struct { - XMLName xml.Name `xml:"PageList"` - PageList []PageRange `xml:"PageRange"` -} - -// PageRange contains information about a page of a page blob from -// Get Pages Range call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type PageRange struct { - Start int64 `xml:"Start"` - End int64 `xml:"End"` -} - -var ( - errBlobCopyAborted = errors.New("storage: blob copy is aborted") - errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") -) - -// BlobExists returns true if a blob with given name exists on the specified +// Exists returns true if a blob with given name exists on the specified // container of the storage account. -func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - headers := b.client.getStandardHeaders() - resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth) +func (b *Blob) Exists() (bool, error) { + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil) + headers := b.Container.bsc.client.getStandardHeaders() + resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil + defer drainRespBody(resp) + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusOK, nil } } return false, err } -// GetBlobURL gets the canonical URL to the blob with the specified name in the -// specified container. If name is not specified, the canonical URL for the entire -// container is obtained. +// GetURL gets the canonical URL to the blob with the specified name in the +// specified container. // This method does not create a publicly accessible URL if the blob or container // is private and this method does not check if the blob exists. -func (b BlobStorageClient) GetBlobURL(container, name string) string { +func (b *Blob) GetURL() string { + container := b.Container.Name if container == "" { container = "$root" } - return b.client.getEndpoint(blobServiceName, pathForResource(container, name), url.Values{}) + return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil) } -// GetBlob returns a stream to read the blob. Caller must call Close() the -// reader to close on the underlying connection. +// GetBlobRangeOptions includes the options for a get blob range operation +type GetBlobRangeOptions struct { + Range *BlobRange + GetRangeContentMD5 bool + *GetBlobOptions +} + +// GetBlobOptions includes the options for a get blob operation +type GetBlobOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// BlobRange represents the bytes range to be get +type BlobRange struct { + Start uint64 + End uint64 +} + +func (br BlobRange) String() string { + if br.End == 0 { + return fmt.Sprintf("bytes=%d-", br.Start) + } + return fmt.Sprintf("bytes=%d-%d", br.Start, br.End) +} + +// Get returns a stream to read the blob. Caller must call both Read and Close() +// to correctly close the underlying connection. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, "", nil) +// See the GetRange method for use with a Range header. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob +func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { + rangeOptions := GetBlobRangeOptions{ + GetBlobOptions: options, + } + resp, err := b.getRange(&rangeOptions) if err != nil { return nil, err } - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { return nil, err } - return resp.body, nil + if err := b.writeProperties(resp.Header, true); err != nil { + return resp.Body, err + } + return resp.Body, nil } -// GetBlobRange reads the specified range of a blob to a stream. The bytesRange +// GetRange reads the specified range of a blob to a stream. The bytesRange // string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, bytesRange, extraHeaders) +// Caller must call both Read and Close()// to correctly close the underlying +// connection. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob +func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) { + resp, err := b.getRange(options) if err != nil { return nil, err } - if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { + if err := checkRespCode(resp, []int{http.StatusPartialContent}); err != nil { return nil, err } - return resp.body, nil + // Content-Length header should not be updated, as the service returns the range length + // (which is not alwys the full blob length) + if err := b.writeProperties(resp.Header, false); err != nil { + return resp.Body, err + } + return resp.Body, nil } -func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) +func (b *Blob) getRange(options *GetBlobRangeOptions) (*http.Response, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - if bytesRange != "" { - headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) + if options != nil { + if options.Range != nil { + headers["Range"] = options.Range.String() + if options.GetRangeContentMD5 { + headers["x-ms-range-get-content-md5"] = "true" + } + } + if options.GetBlobOptions != nil { + headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions)) + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + } } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) if err != nil { return nil, err } return resp, err } -// leasePut is common PUT code for the various acquire/release/break etc functions. -func (b BlobStorageClient) leaseCommonPut(container string, name string, headers map[string]string, expectedStatus int) (http.Header, error) { - params := url.Values{"comp": {"lease"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil { - return nil, err - } - - return resp.headers, nil +// SnapshotOptions includes the options for a snapshot blob operation +type SnapshotOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` } -// SnapshotBlob creates a snapshot for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx -func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout int, extraHeaders map[string]string) (snapshotTimestamp *time.Time, err error) { - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() +// CreateSnapshot creates a snapshot for a blob +// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx +func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) { params := url.Values{"comp": {"snapshot"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - for k, v := range extraHeaders { - headers[k] = v - } - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) if err != nil || resp == nil { return nil, err } + defer drainRespBody(resp) - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil { return nil, err } - snapshotResponse := resp.headers.Get(http.CanonicalHeaderKey("x-ms-snapshot")) + snapshotResponse := resp.Header.Get(http.CanonicalHeaderKey("x-ms-snapshot")) if snapshotResponse != "" { snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse) if err != nil { return nil, err } - return &snapshotTimestamp, nil } return nil, errors.New("Snapshot not created") } -// AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// returns leaseID acquired -// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum -// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1. -func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = acquireLease - - if leaseTimeInSeconds == -1 { - // Do nothing, but don't trigger the following clauses. - } else if leaseTimeInSeconds > 60 || b.client.apiVersion < "2012-02-12" { - leaseTimeInSeconds = 60 - } else if leaseTimeInSeconds < 15 { - leaseTimeInSeconds = 15 - } - - headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) - - if proposedLeaseID != "" { - headers[leaseProposedID] = proposedLeaseID - } - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusCreated) - if err != nil { - return "", err - } - - returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) - - if returnedLeaseID != "" { - return returnedLeaseID, nil - } - - return "", errors.New("LeaseID not returned") +// GetBlobPropertiesOptions includes the options for a get blob properties operation +type GetBlobPropertiesOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` } -// BreakLease breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// Returns the timeout remaining in the lease in seconds -func (b BlobStorageClient) BreakLease(container string, name string) (breakTimeout int, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = breakLease - return b.breakLeaseCommon(container, name, headers) -} +// GetProperties provides various information about the specified blob. +// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx +func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() -// BreakLeaseWithBreakPeriod breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// breakPeriodInSeconds is used to determine how long until new lease can be created. -// Returns the timeout remaining in the lease in seconds -func (b BlobStorageClient) BreakLeaseWithBreakPeriod(container string, name string, breakPeriodInSeconds int) (breakTimeout int, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = breakLease - headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) - return b.breakLeaseCommon(container, name, headers) -} - -// breakLeaseCommon is common code for both version of BreakLease (with and without break period) -func (b BlobStorageClient) breakLeaseCommon(container string, name string, headers map[string]string) (breakTimeout int, err error) { - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusAccepted) - if err != nil { - return 0, err + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) - if breakTimeoutStr != "" { - breakTimeout, err = strconv.Atoi(breakTimeoutStr) - if err != nil { - return 0, err - } - } - - return breakTimeout, nil -} - -// ChangeLease changes a lease ID for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// Returns the new LeaseID acquired -func (b BlobStorageClient) ChangeLease(container string, name string, currentLeaseID string, proposedLeaseID string) (newLeaseID string, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = changeLease - headers[headerLeaseID] = currentLeaseID - headers[leaseProposedID] = proposedLeaseID - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusOK) - if err != nil { - return "", err - } - - newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) - if newLeaseID != "" { - return newLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// ReleaseLease releases the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b BlobStorageClient) ReleaseLease(container string, name string, currentLeaseID string) error { - headers := b.client.getStandardHeaders() - headers[leaseAction] = releaseLease - headers[headerLeaseID] = currentLeaseID - - _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) + resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) if err != nil { return err } + defer drainRespBody(resp) - return nil -} - -// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b BlobStorageClient) RenewLease(container string, name string, currentLeaseID string) error { - headers := b.client.getStandardHeaders() - headers[leaseAction] = renewLease - headers[headerLeaseID] = currentLeaseID - - _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) - if err != nil { + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { return err } - - return nil + return b.writeProperties(resp.Header, true) } -// GetBlobProperties provides various information about the specified -// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx -func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) +func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error { + var err error - headers := b.client.getStandardHeaders() - resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth) - if err != nil { - return nil, err - } - defer readAndCloseBody(resp.body) - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - var contentLength int64 - contentLengthStr := resp.headers.Get("Content-Length") - if contentLengthStr != "" { - contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) - if err != nil { - return nil, err + contentLength := b.Properties.ContentLength + if includeContentLen { + contentLengthStr := h.Get("Content-Length") + if contentLengthStr != "" { + contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) + if err != nil { + return err + } } } var sequenceNum int64 - sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number") + sequenceNumStr := h.Get("x-ms-blob-sequence-number") if sequenceNumStr != "" { sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) if err != nil { - return nil, err + return err } } - return &BlobProperties{ - LastModified: resp.headers.Get("Last-Modified"), - Etag: resp.headers.Get("Etag"), - ContentMD5: resp.headers.Get("Content-MD5"), + lastModified, err := getTimeFromHeaders(h, "Last-Modified") + if err != nil { + return err + } + + copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time") + if err != nil { + return err + } + + b.Properties = BlobProperties{ + LastModified: TimeRFC1123(*lastModified), + Etag: h.Get("Etag"), + ContentMD5: h.Get("Content-MD5"), ContentLength: contentLength, - ContentEncoding: resp.headers.Get("Content-Encoding"), - ContentType: resp.headers.Get("Content-Type"), - CacheControl: resp.headers.Get("Cache-Control"), - ContentLanguage: resp.headers.Get("Content-Language"), + ContentEncoding: h.Get("Content-Encoding"), + ContentType: h.Get("Content-Type"), + ContentDisposition: h.Get("Content-Disposition"), + CacheControl: h.Get("Cache-Control"), + ContentLanguage: h.Get("Content-Language"), SequenceNumber: sequenceNum, - CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), - CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), - CopyID: resp.headers.Get("x-ms-copy-id"), - CopyProgress: resp.headers.Get("x-ms-copy-progress"), - CopySource: resp.headers.Get("x-ms-copy-source"), - CopyStatus: resp.headers.Get("x-ms-copy-status"), - BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), - LeaseStatus: resp.headers.Get("x-ms-lease-status"), - LeaseState: resp.headers.Get("x-ms-lease-state"), - }, nil + CopyCompletionTime: TimeRFC1123(*copyCompletionTime), + CopyStatusDescription: h.Get("x-ms-copy-status-description"), + CopyID: h.Get("x-ms-copy-id"), + CopyProgress: h.Get("x-ms-copy-progress"), + CopySource: h.Get("x-ms-copy-source"), + CopyStatus: h.Get("x-ms-copy-status"), + BlobType: BlobType(h.Get("x-ms-blob-type")), + LeaseStatus: h.Get("x-ms-lease-status"), + LeaseState: h.Get("x-ms-lease-state"), + } + b.writeMetadata(h) + return nil } -// SetBlobProperties replaces the BlobHeaders for the specified blob. +// SetBlobPropertiesOptions contains various properties of a blob and is an entry +// in SetProperties +type SetBlobPropertiesOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + SequenceNumberAction *SequenceNumberAction + RequestID string `header:"x-ms-client-request-id"` +} + +// SequenceNumberAction defines how the blob's sequence number should be modified +type SequenceNumberAction string + +// Options for sequence number action +const ( + SequenceNumberActionMax SequenceNumberAction = "max" + SequenceNumberActionUpdate SequenceNumberAction = "update" + SequenceNumberActionIncrement SequenceNumberAction = "increment" +) + +// SetProperties replaces the BlobHeaders for the specified blob. // // Some keys may be converted to Camel-Case before sending. All keys // are returned in lower case by GetBlobProperties. HTTP header names // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/ee691966.aspx -func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders BlobHeaders) error { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties +func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error { params := url.Values{"comp": {"properties"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() + headers := b.Container.bsc.client.getStandardHeaders() + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) - extraHeaders := headersFromStruct(blobHeaders) + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - for k, v := range extraHeaders { - headers[k] = v + if b.Properties.BlobType == BlobTypePage { + headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength)) + if options != nil && options.SequenceNumberAction != nil { + headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction)) + if *options.SequenceNumberAction != SequenceNumberActionIncrement { + headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber)) + } + } } - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) if err != nil { return err } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusOK}) + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusOK}) } -// SetBlobMetadata replaces the metadata for the specified blob. +// SetBlobMetadataOptions includes the options for a set blob metadata operation +type SetBlobMetadataOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetMetadata replaces the metadata for the specified blob. // // Some keys may be converted to Camel-Case before sending. All keys // are returned in lower case by GetBlobMetadata. HTTP header names @@ -568,563 +486,147 @@ func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders // applications either. // // See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string, extraHeaders map[string]string) error { +func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error { params := url.Values{"comp": {"metadata"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - metadata = b.client.protectUserAgent(metadata) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - for k, v := range metadata { - headers[userDefinedMetadataHeaderPrefix+k] = v - } + headers := b.Container.bsc.client.getStandardHeaders() + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - for k, v := range extraHeaders { - headers[k] = v + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) if err != nil { return err } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusOK}) + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusOK}) } -// GetBlobMetadata returns all user-defined metadata for the specified blob. +// GetBlobMetadataOptions includes the options for a get blob metadata operation +type GetBlobMetadataOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetMetadata returns all user-defined metadata for the specified blob. // // All metadata keys will be returned in lower case. (HTTP header // names are case-insensitive.) // // See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]string, error) { +func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error { params := url.Values{"comp": {"metadata"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() + headers := b.Container.bsc.client.getStandardHeaders() - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return nil, err + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) } - defer readAndCloseBody(resp.body) + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - metadata := make(map[string]string) - for k, v := range resp.headers { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["foo"] = content of the last X-Ms-Meta-Foo header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - return metadata, nil -} - -// CreateBlockBlob initializes an empty block blob with no blocks. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) CreateBlockBlob(container, name string) error { - return b.CreateBlockBlobFromReader(container, name, 0, nil, nil) -} - -// CreateBlockBlobFromReader initializes a block blob using data from -// reader. Size must be the number of bytes read from reader. To -// create an empty blob, use size==0 and reader==nil. -// -// The API rejects requests with size > 64 MiB (but this limit is not -// checked by the SDK). To write a larger blob, use CreateBlockBlob, -// PutBlock, and PutBlockList. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%d", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, blob, b.auth) + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) if err != nil { return err } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} + defer drainRespBody(resp) -// PutBlock saves the given data chunk to the specified block blob with -// given ID. -// -// The API rejects chunks larger than 4 MiB (but this limit is not -// checked by the SDK). -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { - return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk), nil) -} - -// PutBlockWithLength saves the given data stream of exactly specified size to -// the block blob with given ID. It is an alternative to PutBlocks where data -// comes as stream but the length is known in advance. -// -// The API rejects requests with size > 4 MiB (but this limit is not -// checked by the SDK). -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%v", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, blob, b.auth) - if err != nil { - return err - } - - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutBlockList saves list of blocks to the specified block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx -func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error { - blockListXML := prepareBlockListRequest(blocks) - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}}) - headers := b.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) - - resp, err := b.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetBlockList retrieves list of blocks in the specified block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) { - params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - - var out BlockListResponse - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// PutPageBlob initializes an empty page blob with specified name and maximum -// size in bytes (size must be aligned to a 512-byte boundary). A page blob must -// be created using this method before writing pages. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutPage writes a range of pages to a page blob or clears the given range. -// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned -// with 512-byte boundaries and chunk must be of size multiplies by 512. -// -// See https://msdn.microsoft.com/en-us/library/ee691975.aspx -func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-page-write"] = string(writeType) - headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte) - for k, v := range extraHeaders { - headers[k] = v - } - var contentLength int64 - var data io.Reader - if writeType == PageWriteTypeClear { - contentLength = 0 - data = bytes.NewReader([]byte{}) - } else { - contentLength = int64(len(chunk)) - data = bytes.NewReader(chunk) - } - headers["Content-Length"] = fmt.Sprintf("%v", contentLength) - - resp, err := b.client.exec(http.MethodPut, uri, headers, data, b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetPageRanges returns the list of valid page ranges for a page blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}}) - headers := b.client.getStandardHeaders() - - var out GetPageRangesResponse - resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) - if err != nil { - return out, err - } - defer resp.body.Close() - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return out, err - } - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// PutAppendBlob initializes an empty append blob with specified name. An -// append blob must be created using this method before appending blocks. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// AppendBlock appends a block to an append blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427365.aspx -func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// CopyBlob starts a blob copy operation and waits for the operation to -// complete. sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore -// this helper method works faster on smaller files. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx -func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error { - copyID, err := b.StartBlobCopy(container, name, sourceBlob) - if err != nil { - return err - } - - return b.WaitForBlobCopy(container, name, copyID) -} - -// StartBlobCopy starts a blob copy operation. -// sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using GetBlobURL method.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx -func (b BlobStorageClient) StartBlobCopy(container, name, sourceBlob string) (string, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - headers["x-ms-copy-source"] = sourceBlob - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return "", err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { - return "", err - } - - copyID := resp.headers.Get("x-ms-copy-id") - if copyID == "" { - return "", errors.New("Got empty copy id header") - } - return copyID, nil -} - -// AbortBlobCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. -// copyID is generated from StartBlobCopy function. -// currentLeaseID is required IF the destination blob has an active lease on it. -// As defined in https://msdn.microsoft.com/en-us/library/azure/jj159098.aspx -func (b BlobStorageClient) AbortBlobCopy(container, name, copyID, currentLeaseID string, timeout int) error { - params := url.Values{"comp": {"copy"}, "copyid": {copyID}} - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - headers["x-ms-copy-action"] = "abort" - - if currentLeaseID != "" { - headers[headerLeaseID] = currentLeaseID - } - - resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { return err } + b.writeMetadata(resp.Header) return nil } -// WaitForBlobCopy loops until a BlobCopy operation is completed (or fails with error) -func (b BlobStorageClient) WaitForBlobCopy(container, name, copyID string) error { - for { - props, err := b.GetBlobProperties(container, name) - if err != nil { - return err - } - - if props.CopyID != copyID { - return errBlobCopyIDMismatch - } - - switch props.CopyStatus { - case blobCopyStatusSuccess: - return nil - case blobCopyStatusPending: - continue - case blobCopyStatusAborted: - return errBlobCopyAborted - case blobCopyStatusFailed: - return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription) - default: - return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus) - } - } +func (b *Blob) writeMetadata(h http.Header) { + b.Metadata = BlobMetadata(writeMetadata(h)) } -// DeleteBlob deletes the given blob from the specified container. +// DeleteBlobOptions includes the options for a delete blob operation +type DeleteBlobOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + DeleteSnapshots *bool + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// Delete deletes the given blob from the specified container. // If the blob does not exists at the time of the Delete Blob operation, it -// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[string]string) error { - resp, err := b.deleteBlob(container, name, extraHeaders) +// returns error. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob +func (b *Blob) Delete(options *DeleteBlobOptions) error { + resp, err := b.delete(options) if err != nil { return err } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusAccepted}) } -// DeleteBlobIfExists deletes the given blob from the specified container If the +// DeleteIfExists deletes the given blob from the specified container If the // blob is deleted with this call, returns true. Otherwise returns false. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) { - resp, err := b.deleteBlob(container, name, extraHeaders) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob +func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) { + resp, err := b.delete(options) if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil + defer drainRespBody(resp) + if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusAccepted, nil } } return false, err } -func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[string]string) (*storageResponse, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - extraHeaders = b.client.protectUserAgent(extraHeaders) - headers := b.client.getStandardHeaders() - for k, v := range extraHeaders { - headers[k] = v +func (b *Blob) delete(options *DeleteBlobOptions) (*http.Response, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + if options.DeleteSnapshots != nil { + if *options.DeleteSnapshots { + headers["x-ms-delete-snapshots"] = "include" + } else { + headers["x-ms-delete-snapshots"] = "only" + } + } } - - return b.client.exec(http.MethodDelete, uri, headers, nil, b.auth) -} - -// helper method to construct the path to a blob given its container and blob -// name -func pathForBlob(container, name string) string { - return fmt.Sprintf("/%s/%s", container, name) + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth) } // helper method to construct the path to either a blob or container func pathForResource(container, name string) string { - if len(name) > 0 { + if name != "" { return fmt.Sprintf("/%s/%s", container, name) } return fmt.Sprintf("/%s", container) } -// GetBlobSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols. -// If old API version is used but no signedIP is passed (ie empty string) then this should still work. -// We only populate the signedIP when it non-empty. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b BlobStorageClient) GetBlobSASURIWithSignedIPAndProtocol(container, name string, expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) { - var ( - signedPermissions = permissions - blobURL = b.GetBlobURL(container, name) - ) - canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL, b.auth) +func (b *Blob) respondCreation(resp *http.Response, bt BlobType) error { + defer drainRespBody(resp) + err := checkRespCode(resp, []int{http.StatusCreated}) if err != nil { - return "", err + return err } - - // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. - // It must include the service name (blob, table, queue or file) for version 2015-02-21 or - // later, the storage account name, and the resource name, and must be URL-decoded. - // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - - // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). - canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) - if err != nil { - return "", err - } - - signedExpiry := expiry.UTC().Format(time.RFC3339) - - //If blob name is missing, resource is a container - signedResource := "c" - if len(name) > 0 { - signedResource = "b" - } - - protocols := "https,http" - if HTTPSOnly { - protocols = "https" - } - stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols) - if err != nil { - return "", err - } - - sig := b.client.computeHmac256(stringToSign) - sasParams := url.Values{ - "sv": {b.client.apiVersion}, - "se": {signedExpiry}, - "sr": {signedResource}, - "sp": {signedPermissions}, - "sig": {sig}, - } - - if b.client.apiVersion >= "2015-04-05" { - sasParams.Add("spr", protocols) - if signedIPRange != "" { - sasParams.Add("sip", signedIPRange) - } - } - - sasURL, err := url.Parse(blobURL) - if err != nil { - return "", err - } - sasURL.RawQuery = sasParams.Encode() - return sasURL.String(), nil -} - -// GetBlobSASURI creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) { - url, err := b.GetBlobSASURIWithSignedIPAndProtocol(container, name, expiry, permissions, "", false) - return url, err -} - -func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) { - var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string - - if signedVersion >= "2015-02-21" { - canonicalizedResource = "/blob" + canonicalizedResource - } - - // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 - if signedVersion >= "2015-04-05" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - if signedVersion >= "2013-08-15" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") + b.Properties.BlobType = bt + return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go new file mode 100644 index 00000000..31894dbf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go @@ -0,0 +1,174 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// OverrideHeaders defines overridable response heaedrs in +// a request using a SAS URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type OverrideHeaders struct { + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string +} + +// BlobSASOptions are options to construct a blob SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type BlobSASOptions struct { + BlobServiceSASPermissions + OverrideHeaders + SASOptions +} + +// BlobServiceSASPermissions includes the available permissions for +// blob service SAS URI. +type BlobServiceSASPermissions struct { + Read bool + Add bool + Create bool + Write bool + Delete bool +} + +func (p BlobServiceSASPermissions) buildString() string { + permissions := "" + if p.Read { + permissions += "r" + } + if p.Add { + permissions += "a" + } + if p.Create { + permissions += "c" + } + if p.Write { + permissions += "w" + } + if p.Delete { + permissions += "d" + } + return permissions +} + +// GetSASURI creates an URL to the blob which contains the Shared +// Access Signature with the specified options. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) { + uri := b.GetURL() + signedResource := "b" + canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true) + if err != nil { + return "", err + } + + permissions := options.BlobServiceSASPermissions.buildString() + return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) +} + +func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) { + start := "" + if options.Start != (time.Time{}) { + start = options.Start.UTC().Format(time.RFC3339) + } + + expiry := options.Expiry.UTC().Format(time.RFC3339) + + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + canonicalizedResource, err := url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + + protocols := "" + if options.UseHTTPS { + protocols = "https" + } + stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, headers) + if err != nil { + return "", err + } + + sig := c.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {c.apiVersion}, + "se": {expiry}, + "sr": {signedResource}, + "sp": {permissions}, + "sig": {sig}, + } + + if start != "" { + sasParams.Add("st", start) + } + + if c.apiVersion >= "2015-04-05" { + if protocols != "" { + sasParams.Add("spr", protocols) + } + if options.IP != "" { + sasParams.Add("sip", options.IP) + } + } + + // Add override response hedaers + addQueryParameter(sasParams, "rscc", headers.CacheControl) + addQueryParameter(sasParams, "rscd", headers.ContentDisposition) + addQueryParameter(sasParams, "rsce", headers.ContentEncoding) + addQueryParameter(sasParams, "rscl", headers.ContentLanguage) + addQueryParameter(sasParams, "rsct", headers.ContentType) + + sasURL, err := url.Parse(uri) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion string, headers OverrideHeaders) (string, error) { + rscc := headers.CacheControl + rscd := headers.ContentDisposition + rsce := headers.ContentEncoding + rscl := headers.ContentLanguage + rsct := headers.ContentType + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/blob" + canonicalizedResource + } + + // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 + if signedVersion >= "2015-04-05" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go index e5911ac8..f6a9da28 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go @@ -1,9 +1,26 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( + "encoding/xml" "fmt" "net/http" "net/url" + "strconv" + "strings" ) // BlobStorageClient contains operations for Microsoft Azure Blob Storage @@ -38,13 +55,28 @@ type ListContainersParameters struct { } // GetContainerReference returns a Container object for the specified container name. -func (b BlobStorageClient) GetContainerReference(name string) Container { - return Container{ - bsc: &b, +func (b *BlobStorageClient) GetContainerReference(name string) *Container { + return &Container{ + bsc: b, Name: name, } } +// GetContainerReferenceFromSASURI returns a Container object for the specified +// container SASURI +func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) { + path := strings.Split(sasuri.Path, "/") + if len(path) <= 1 { + return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String()) + } + cli := newSASClient().GetBlobService() + return &Container{ + bsc: &cli, + Name: path[1], + sasuri: sasuri, + }, nil +} + // ListContainers returns the list of containers in a storage account along with // pagination token and other response details. // @@ -54,18 +86,53 @@ func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*Con uri := b.client.getEndpoint(blobServiceName, "", q) headers := b.client.getStandardHeaders() - var out ContainerListResponse + type ContainerAlias struct { + bsc *BlobStorageClient + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + Metadata BlobMetadata + sasuri url.URL + } + type ContainerListResponseAlias struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []ContainerAlias `xml:"Containers>Container"` + } + + var outAlias ContainerListResponseAlias resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) if err != nil { return nil, err } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &out) - - // assign our client to the newly created Container objects - for i := range out.Containers { - out.Containers[i].bsc = &b + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &outAlias) + if err != nil { + return nil, err } + + out := ContainerListResponse{ + XMLName: outAlias.XMLName, + Xmlns: outAlias.Xmlns, + Prefix: outAlias.Prefix, + Marker: outAlias.Marker, + NextMarker: outAlias.NextMarker, + MaxResults: outAlias.MaxResults, + Containers: make([]Container, len(outAlias.Containers)), + } + for i, cnt := range outAlias.Containers { + out.Containers[i] = Container{ + bsc: &b, + Name: cnt.Name, + Properties: cnt.Properties, + Metadata: map[string]string(cnt.Metadata), + sasuri: cnt.sasuri, + } + } + return &out, err } @@ -82,11 +149,34 @@ func (p ListContainersParameters) getParameters() url.Values { out.Set("include", p.Include) } if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) } if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) } return out } + +func writeMetadata(h http.Header) map[string]string { + metadata := make(map[string]string) + for k, v := range h { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["lol"] = content of the last X-Ms-Meta-Lol header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + return metadata +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go new file mode 100644 index 00000000..c9c62d79 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go @@ -0,0 +1,270 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// BlockListType is used to filter out types of blocks in a Get Blocks List call +// for a block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all +// block types. +type BlockListType string + +// Filters for listing blocks in block blobs +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// Maximum sizes (per REST API) for various concepts +const ( + MaxBlobBlockSize = 100 * 1024 * 1024 + MaxBlobPageSize = 4 * 1024 * 1024 +) + +// BlockStatus defines states a block for a block blob can +// be in. +type BlockStatus string + +// List of statuses that can be used to refer to a block in a block list +const ( + BlockStatusUncommitted BlockStatus = "Uncommitted" + BlockStatusCommitted BlockStatus = "Committed" + BlockStatusLatest BlockStatus = "Latest" +) + +// Block is used to create Block entities for Put Block List +// call. +type Block struct { + ID string + Status BlockStatus +} + +// BlockListResponse contains the response fields from Get Block List call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +type BlockListResponse struct { + XMLName xml.Name `xml:"BlockList"` + CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` + UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` +} + +// BlockResponse contains the block information returned +// in the GetBlockListCall. +type BlockResponse struct { + Name string `xml:"Name"` + Size int64 `xml:"Size"` +} + +// CreateBlockBlob initializes an empty block blob with no blocks. +// +// See CreateBlockBlobFromReader for more info on creating blobs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error { + return b.CreateBlockBlobFromReader(nil, options) +} + +// CreateBlockBlobFromReader initializes a block blob using data from +// reader. Size must be the number of bytes read from reader. To +// create an empty blob, use size==0 and reader==nil. +// +// Any headers set in blob.Properties or metadata in blob.Metadata +// will be set on the blob. +// +// The API rejects requests with size > 256 MiB (but this limit is not +// checked by the SDK). To write a larger blob, use CreateBlockBlob, +// PutBlock, and PutBlockList. +// +// To create a blob from scratch, call container.GetBlobReference() to +// get an empty blob, fill in blob.Properties and blob.Metadata as +// appropriate then call this method. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + + headers["Content-Length"] = "0" + var n int64 + var err error + if blob != nil { + type lener interface { + Len() int + } + // TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc. + if l, ok := blob.(lener); ok { + n = int64(l.Len()) + } else { + var buf bytes.Buffer + n, err = io.Copy(&buf, blob) + if err != nil { + return err + } + blob = &buf + } + + headers["Content-Length"] = strconv.FormatInt(n, 10) + } + b.Properties.ContentLength = n + + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeBlock) +} + +// PutBlockOptions includes the options for a put block operation +type PutBlockOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + ContentMD5 string `header:"Content-MD5"` + RequestID string `header:"x-ms-client-request-id"` +} + +// PutBlock saves the given data chunk to the specified block blob with +// given ID. +// +// The API rejects chunks larger than 100 MiB (but this limit is not +// checked by the SDK). +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block +func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error { + return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options) +} + +// PutBlockWithLength saves the given data stream of exactly specified size to +// the block blob with given ID. It is an alternative to PutBlocks where data +// comes as stream but the length is known in advance. +// +// The API rejects requests with size > 100 MiB (but this limit is not +// checked by the SDK). +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block +func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error { + query := url.Values{ + "comp": {"block"}, + "blockid": {blockID}, + } + headers := b.Container.bsc.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", size) + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeBlock) +} + +// PutBlockListOptions includes the options for a put block list operation +type PutBlockListOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// PutBlockList saves list of blocks to the specified block blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List +func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error { + params := url.Values{"comp": {"blocklist"}} + blockListXML := prepareBlockListRequest(blocks) + headers := b.Container.bsc.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusCreated}) +} + +// GetBlockListOptions includes the options for a get block list operation +type GetBlockListOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetBlockList retrieves list of blocks in the specified block blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List +func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) { + params := url.Values{ + "comp": {"blocklist"}, + "blocklisttype": {string(blockType)}, + } + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + var out BlockListResponse + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go index e42082d4..2930824b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -1,8 +1,22 @@ // Package storage provides clients for Microsoft Azure Storage Services. package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( - "bytes" + "bufio" "encoding/base64" "encoding/json" "encoding/xml" @@ -10,12 +24,18 @@ import ( "fmt" "io" "io/ioutil" + "mime" + "mime/multipart" "net/http" "net/url" + "regexp" "runtime" "strconv" "strings" + "time" + "github.com/Azure/azure-sdk-for-go/version" + "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" ) @@ -26,9 +46,11 @@ const ( // DefaultAPIVersion is the Azure Storage API version string used when a // basic client is created. - DefaultAPIVersion = "2015-04-05" + DefaultAPIVersion = "2016-05-31" - defaultUseHTTPS = true + defaultUseHTTPS = true + defaultRetryAttempts = 5 + defaultRetryDuration = time.Second * 5 // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator StorageEmulatorAccountName = "devstoreaccount1" @@ -46,15 +68,79 @@ const ( storageEmulatorQueue = "127.0.0.1:10001" userAgentHeader = "User-Agent" + + userDefinedMetadataHeaderPrefix = "x-ms-meta-" + + connectionStringAccountName = "accountname" + connectionStringAccountKey = "accountkey" + connectionStringEndpointSuffix = "endpointsuffix" + connectionStringEndpointProtocol = "defaultendpointsprotocol" + + connectionStringBlobEndpoint = "blobendpoint" + connectionStringFileEndpoint = "fileendpoint" + connectionStringQueueEndpoint = "queueendpoint" + connectionStringTableEndpoint = "tableendpoint" + connectionStringSAS = "sharedaccesssignature" ) +var ( + validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") + defaultValidStatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +// Sender sends a request +type Sender interface { + Send(*Client, *http.Request) (*http.Response, error) +} + +// DefaultSender is the default sender for the client. It implements +// an automatic retry strategy. +type DefaultSender struct { + RetryAttempts int + RetryDuration time.Duration + ValidStatusCodes []int + attempts int // used for testing +} + +// Send is the default retry strategy in the client +func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(req) + for attempts := 0; attempts < ds.RetryAttempts; attempts++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = c.HTTPClient.Do(rr.Request()) + if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) { + return resp, err + } + drainRespBody(resp) + autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel) + ds.attempts = attempts + } + ds.attempts++ + return resp, err +} + // Client is the object that needs to be constructed to perform // operations on the storage account. type Client struct { // HTTPClient is the http.Client used to initiate API - // requests. If it is nil, http.DefaultClient is used. + // requests. http.DefaultClient is used when creating a + // client. HTTPClient *http.Client + // Sender is an interface that sends the request. Clients are + // created with a DefaultSender. The DefaultSender has an + // automatic retry strategy built in. The Sender can be customized. + Sender Sender + accountName string accountKey []byte useHTTPS bool @@ -62,17 +148,13 @@ type Client struct { baseURL string apiVersion string userAgent string -} - -type storageResponse struct { - statusCode int - headers http.Header - body io.ReadCloser + sasClient bool + accountSASToken url.Values } type odataResponse struct { - storageResponse - odata odataErrorMessage + resp *http.Response + odata odataErrorWrapper } // AzureStorageServiceError contains fields of the error response from @@ -85,22 +167,25 @@ type AzureStorageServiceError struct { QueryParameterName string `xml:"QueryParameterName"` QueryParameterValue string `xml:"QueryParameterValue"` Reason string `xml:"Reason"` + Lang string StatusCode int RequestID string + Date string + APIVersion string } -type odataErrorMessageMessage struct { +type odataErrorMessage struct { Lang string `json:"lang"` Value string `json:"value"` } -type odataErrorMessageInternal struct { - Code string `json:"code"` - Message odataErrorMessageMessage `json:"message"` +type odataError struct { + Code string `json:"code"` + Message odataErrorMessage `json:"message"` } -type odataErrorMessage struct { - Err odataErrorMessageInternal `json:"odata.error"` +type odataErrorWrapper struct { + Err odataError `json:"odata.error"` } // UnexpectedStatusCodeError is returned when a storage service responds with neither an error @@ -108,6 +193,7 @@ type odataErrorMessage struct { type UnexpectedStatusCodeError struct { allowed []int got int + inner error } func (e UnexpectedStatusCodeError) Error() string { @@ -118,7 +204,7 @@ func (e UnexpectedStatusCodeError) Error() string { for _, v := range e.allowed { expected = append(expected, s(v)) } - return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or ")) + return fmt.Sprintf("storage: status code from service response is %s; was expecting %s. Inner error: %+v", got, strings.Join(expected, " or "), e.inner) } // Got is the actual status code returned by Azure. @@ -126,6 +212,60 @@ func (e UnexpectedStatusCodeError) Got() int { return e.got } +// Inner returns any inner error info. +func (e UnexpectedStatusCodeError) Inner() error { + return e.inner +} + +// NewClientFromConnectionString creates a Client from the connection string. +func NewClientFromConnectionString(input string) (Client, error) { + // build a map of connection string key/value pairs + parts := map[string]string{} + for _, pair := range strings.Split(input, ";") { + if pair == "" { + continue + } + + equalDex := strings.IndexByte(pair, '=') + if equalDex <= 0 { + return Client{}, fmt.Errorf("Invalid connection segment %q", pair) + } + + value := strings.TrimSpace(pair[equalDex+1:]) + key := strings.TrimSpace(strings.ToLower(pair[:equalDex])) + parts[key] = value + } + + // TODO: validate parameter sets? + + if parts[connectionStringAccountName] == StorageEmulatorAccountName { + return NewEmulatorClient() + } + + if parts[connectionStringSAS] != "" { + endpoint := "" + if parts[connectionStringBlobEndpoint] != "" { + endpoint = parts[connectionStringBlobEndpoint] + } else if parts[connectionStringFileEndpoint] != "" { + endpoint = parts[connectionStringFileEndpoint] + } else if parts[connectionStringQueueEndpoint] != "" { + endpoint = parts[connectionStringQueueEndpoint] + } else { + endpoint = parts[connectionStringTableEndpoint] + } + + return NewAccountSASClientFromEndpointToken(endpoint, parts[connectionStringSAS]) + } + + useHTTPS := defaultUseHTTPS + if parts[connectionStringEndpointProtocol] != "" { + useHTTPS = parts[connectionStringEndpointProtocol] == "https" + } + + return NewClient(parts[connectionStringAccountName], parts[connectionStringAccountKey], + parts[connectionStringEndpointSuffix], DefaultAPIVersion, useHTTPS) +} + // NewBasicClient constructs a Client with given storage service name and // key. func NewBasicClient(accountName, accountKey string) (Client, error) { @@ -153,13 +293,13 @@ func NewEmulatorClient() (Client, error) { // NewClient constructs a Client. This should be used if the caller wants // to specify whether to use HTTPS, a specific REST API version or a custom // storage endpoint than Azure Public Cloud. -func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { +func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { var c Client - if accountName == "" { - return c, fmt.Errorf("azure: account name required") + if !IsValidStorageAccount(accountName) { + return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName) } else if accountKey == "" { return c, fmt.Errorf("azure: account key required") - } else if blobServiceBaseURL == "" { + } else if serviceBaseURL == "" { return c, fmt.Errorf("azure: base storage service url required") } @@ -169,23 +309,114 @@ func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, u } c = Client{ + HTTPClient: http.DefaultClient, accountName: accountName, accountKey: key, useHTTPS: useHTTPS, - baseURL: blobServiceBaseURL, + baseURL: serviceBaseURL, apiVersion: apiVersion, + sasClient: false, UseSharedKeyLite: false, + Sender: &DefaultSender{ + RetryAttempts: defaultRetryAttempts, + ValidStatusCodes: defaultValidStatusCodes, + RetryDuration: defaultRetryDuration, + }, } c.userAgent = c.getDefaultUserAgent() return c, nil } +// IsValidStorageAccount checks if the storage account name is valid. +// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account +func IsValidStorageAccount(account string) bool { + return validStorageAccount.MatchString(account) +} + +// NewAccountSASClient contructs a client that uses accountSAS authorization +// for its operations. +func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client { + c := newSASClient() + c.accountSASToken = token + c.accountName = account + c.baseURL = env.StorageEndpointSuffix + + // Get API version and protocol from token + c.apiVersion = token.Get("sv") + c.useHTTPS = token.Get("spr") == "https" + return c +} + +// NewAccountSASClientFromEndpointToken constructs a client that uses accountSAS authorization +// for its operations using the specified endpoint and SAS token. +func NewAccountSASClientFromEndpointToken(endpoint string, sasToken string) (Client, error) { + u, err := url.Parse(endpoint) + if err != nil { + return Client{}, err + } + + token, err := url.ParseQuery(sasToken) + if err != nil { + return Client{}, err + } + + // the host name will look something like this + // - foo.blob.core.windows.net + // "foo" is the account name + // "core.windows.net" is the baseURL + + // find the first dot to get account name + i1 := strings.IndexByte(u.Host, '.') + if i1 < 0 { + return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host) + } + + // now find the second dot to get the base URL + i2 := strings.IndexByte(u.Host[i1+1:], '.') + if i2 < 0 { + return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host[i1+1:]) + } + + c := newSASClient() + c.accountSASToken = token + c.accountName = u.Host[:i1] + c.baseURL = u.Host[i1+i2+2:] + + // Get API version and protocol from token + c.apiVersion = token.Get("sv") + c.useHTTPS = token.Get("spr") == "https" + return c, nil +} + +func newSASClient() Client { + c := Client{ + HTTPClient: http.DefaultClient, + apiVersion: DefaultAPIVersion, + sasClient: true, + Sender: &DefaultSender{ + RetryAttempts: defaultRetryAttempts, + ValidStatusCodes: defaultValidStatusCodes, + RetryDuration: defaultRetryDuration, + }, + } + c.userAgent = c.getDefaultUserAgent() + return c +} + +func (c Client) isServiceSASClient() bool { + return c.sasClient && c.accountSASToken == nil +} + +func (c Client) isAccountSASClient() bool { + return c.sasClient && c.accountSASToken != nil +} + func (c Client) getDefaultUserAgent() string { - return fmt.Sprintf("Go/%s (%s-%s) Azure-SDK-For-Go/%s storage-dataplane/%s", + return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s", runtime.Version(), runtime.GOARCH, runtime.GOOS, - sdkVersion, + version.Number, c.apiVersion, ) } @@ -210,7 +441,7 @@ func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]str return extraheaders } -func (c Client) getBaseURL(service string) string { +func (c Client) getBaseURL(service string) *url.URL { scheme := "http" if c.useHTTPS { scheme = "https" @@ -229,18 +460,14 @@ func (c Client) getBaseURL(service string) string { host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) } - u := &url.URL{ + return &url.URL{ Scheme: scheme, - Host: host} - return u.String() + Host: host, + } } func (c Client) getEndpoint(service, path string, params url.Values) string { - u, err := url.Parse(c.getBaseURL(service)) - if err != nil { - // really should not be happening - panic(err) - } + u := c.getBaseURL(service) // API doesn't accept path segments not starting with '/' if !strings.HasPrefix(path, "/") { @@ -256,6 +483,160 @@ func (c Client) getEndpoint(service, path string, params url.Values) string { return u.String() } +// AccountSASTokenOptions includes options for constructing +// an account SAS token. +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas +type AccountSASTokenOptions struct { + APIVersion string + Services Services + ResourceTypes ResourceTypes + Permissions Permissions + Start time.Time + Expiry time.Time + IP string + UseHTTPS bool +} + +// Services specify services accessible with an account SAS. +type Services struct { + Blob bool + Queue bool + Table bool + File bool +} + +// ResourceTypes specify the resources accesible with an +// account SAS. +type ResourceTypes struct { + Service bool + Container bool + Object bool +} + +// Permissions specifies permissions for an accountSAS. +type Permissions struct { + Read bool + Write bool + Delete bool + List bool + Add bool + Create bool + Update bool + Process bool +} + +// GetAccountSASToken creates an account SAS token +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas +func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) { + if options.APIVersion == "" { + options.APIVersion = c.apiVersion + } + + if options.APIVersion < "2015-04-05" { + return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion) + } + + // build services string + services := "" + if options.Services.Blob { + services += "b" + } + if options.Services.Queue { + services += "q" + } + if options.Services.Table { + services += "t" + } + if options.Services.File { + services += "f" + } + + // build resources string + resources := "" + if options.ResourceTypes.Service { + resources += "s" + } + if options.ResourceTypes.Container { + resources += "c" + } + if options.ResourceTypes.Object { + resources += "o" + } + + // build permissions string + permissions := "" + if options.Permissions.Read { + permissions += "r" + } + if options.Permissions.Write { + permissions += "w" + } + if options.Permissions.Delete { + permissions += "d" + } + if options.Permissions.List { + permissions += "l" + } + if options.Permissions.Add { + permissions += "a" + } + if options.Permissions.Create { + permissions += "c" + } + if options.Permissions.Update { + permissions += "u" + } + if options.Permissions.Process { + permissions += "p" + } + + // build start time, if exists + start := "" + if options.Start != (time.Time{}) { + start = options.Start.UTC().Format(time.RFC3339) + } + + // build expiry time + expiry := options.Expiry.UTC().Format(time.RFC3339) + + protocol := "https,http" + if options.UseHTTPS { + protocol = "https" + } + + stringToSign := strings.Join([]string{ + c.accountName, + permissions, + services, + resources, + start, + expiry, + options.IP, + protocol, + options.APIVersion, + "", + }, "\n") + signature := c.computeHmac256(stringToSign) + + sasParams := url.Values{ + "sv": {options.APIVersion}, + "ss": {services}, + "srt": {resources}, + "sp": {permissions}, + "se": {expiry}, + "spr": {protocol}, + "sig": {signature}, + } + if start != "" { + sasParams.Add("st", start) + } + if options.IP != "" { + sasParams.Add("sip", options.IP) + } + + return sasParams, nil +} + // GetBlobService returns a BlobStorageClient which can operate on the blob // service of the storage account. func (c Client) GetBlobService() BlobStorageClient { @@ -320,7 +701,7 @@ func (c Client) getStandardHeaders() map[string]string { } } -func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) { +func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*http.Response, error) { headers, err := c.addAuthorizationHeader(verb, url, headers, auth) if err != nil { return nil, err @@ -331,66 +712,43 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader return nil, errors.New("azure/storage: error creating request: " + err.Error()) } - if clstr, ok := headers["Content-Length"]; ok { - // content length header is being signed, but completely ignored by golang. - // instead we have to use the ContentLength property on the request struct - // (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and - // https://golang.org/src/net/http/transfer.go?s=1739:2467#L49) - req.ContentLength, err = strconv.ParseInt(clstr, 10, 64) - if err != nil { - return nil, err + // http.NewRequest() will automatically set req.ContentLength for a handful of types + // otherwise we will handle here. + if req.ContentLength < 1 { + if clstr, ok := headers["Content-Length"]; ok { + if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil { + req.ContentLength = cl + } } } + for k, v := range headers { - req.Header.Add(k, v) + req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645 } - httpClient := c.HTTPClient - if httpClient == nil { - httpClient = http.DefaultClient + if c.isAccountSASClient() { + // append the SAS token to the query params + v := req.URL.Query() + v = mergeParams(v, c.accountSASToken) + req.URL.RawQuery = v.Encode() } - resp, err := httpClient.Do(req) + + resp, err := c.Sender.Send(&c, req) if err != nil { return nil, err } - statusCode := resp.StatusCode - if statusCode >= 400 && statusCode <= 505 { - var respBody []byte - respBody, err = readAndCloseBody(resp.Body) - if err != nil { - return nil, err - } - - requestID := resp.Header.Get("x-ms-request-id") - if len(respBody) == 0 { - // no error in response body, might happen in HEAD requests - err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID) - } else { - // response contains storage service error object, unmarshal - storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, requestID) - if err != nil { // error unmarshaling the error response - err = errIn - } - err = storageErr - } - return &storageResponse{ - statusCode: resp.StatusCode, - headers: resp.Header, - body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */ - }, err + if resp.StatusCode >= 400 && resp.StatusCode <= 505 { + return resp, getErrorFromResponse(resp) } - return &storageResponse{ - statusCode: resp.StatusCode, - headers: resp.Header, - body: resp.Body}, nil + return resp, nil } -func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { +func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) { headers, err := c.addAuthorizationHeader(verb, url, headers, auth) if err != nil { - return nil, err + return nil, nil, nil, err } req, err := http.NewRequest(verb, url, body) @@ -398,42 +756,122 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo req.Header.Add(k, v) } - httpClient := c.HTTPClient - if httpClient == nil { - httpClient = http.DefaultClient - } - - resp, err := httpClient.Do(req) + resp, err := c.Sender.Send(&c, req) if err != nil { - return nil, err + return nil, nil, nil, err } - respToRet := &odataResponse{} - respToRet.body = resp.Body - respToRet.statusCode = resp.StatusCode - respToRet.headers = resp.Header + respToRet := &odataResponse{resp: resp} statusCode := resp.StatusCode if statusCode >= 400 && statusCode <= 505 { var respBody []byte respBody, err = readAndCloseBody(resp.Body) if err != nil { - return nil, err + return nil, nil, nil, err } + requestID, date, version := getDebugHeaders(resp.Header) if len(respBody) == 0 { // no error in response body, might happen in HEAD requests - err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, resp.Header.Get("x-ms-request-id")) - return respToRet, err + err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) + return respToRet, req, resp, err } // try unmarshal as odata.error json err = json.Unmarshal(respBody, &respToRet.odata) - return respToRet, err + } + + return respToRet, req, resp, err +} + +func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { + respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth) + return respToRet, err +} + +func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { + // execute common query, get back generated request, response etc... for more processing. + respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth) + if err != nil { + return nil, err + } + + // return the OData in the case of executing batch commands. + // In this case we need to read the outer batch boundary and contents. + // Then we read the changeset information within the batch + var respBody []byte + respBody, err = readAndCloseBody(resp.Body) + if err != nil { + return nil, err + } + + // outer multipart body + _, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0]) + if err != nil { + return nil, err + } + + // batch details. + batchBoundary := batchHeader["boundary"] + batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody) + if err != nil { + return nil, err + } + + // changeset details. + err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary) + if err != nil { + return nil, err } return respToRet, nil } +func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error { + changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary) + changesetPart, err := changesetMultiReader.NextPart() + if err != nil { + return err + } + + changesetPartBufioReader := bufio.NewReader(changesetPart) + changesetResp, err := http.ReadResponse(changesetPartBufioReader, req) + if err != nil { + return err + } + + if changesetResp.StatusCode != http.StatusNoContent { + changesetBody, err := readAndCloseBody(changesetResp.Body) + err = json.Unmarshal(changesetBody, &respToRet.odata) + if err != nil { + return err + } + respToRet.resp = changesetResp + } + + return nil +} + +func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) { + respBodyString := string(respBody) + respBodyReader := strings.NewReader(respBodyString) + + // reading batchresponse + batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary) + batchPart, err := batchMultiReader.NextPart() + if err != nil { + return nil, "", err + } + batchPartBufioReader := bufio.NewReader(batchPart) + + _, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type")) + if err != nil { + return nil, "", err + } + changesetBoundary := changesetHeader["boundary"] + return batchPartBufioReader, changesetBoundary, nil +} + func readAndCloseBody(body io.ReadCloser) ([]byte, error) { defer body.Close() out, err := ioutil.ReadAll(body) @@ -443,37 +881,109 @@ func readAndCloseBody(body io.ReadCloser) ([]byte, error) { return out, err } -func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) { - var storageErr AzureStorageServiceError - if err := xml.Unmarshal(body, &storageErr); err != nil { - return storageErr, err - } - storageErr.StatusCode = statusCode - storageErr.RequestID = requestID - return storageErr, nil +// reads the response body then closes it +func drainRespBody(resp *http.Response) { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() } -func serviceErrFromStatusCode(code int, status string, requestID string) AzureStorageServiceError { +func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error { + if err := xml.Unmarshal(body, storageErr); err != nil { + storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) + return err + } + return nil +} + +func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error { + odataError := odataErrorWrapper{} + if err := json.Unmarshal(body, &odataError); err != nil { + storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) + return err + } + storageErr.Code = odataError.Err.Code + storageErr.Message = odataError.Err.Message.Value + storageErr.Lang = odataError.Err.Message.Lang + return nil +} + +func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError { return AzureStorageServiceError{ StatusCode: code, Code: status, RequestID: requestID, + Date: date, + APIVersion: version, Message: "no response body was available for error status code", } } func (e AzureStorageServiceError) Error() string { - return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s", - e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue) + return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s", + e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue) } // checkRespCode returns UnexpectedStatusError if the given response code is not // one of the allowed status codes; otherwise nil. -func checkRespCode(respCode int, allowed []int) error { +func checkRespCode(resp *http.Response, allowed []int) error { for _, v := range allowed { - if respCode == v { + if resp.StatusCode == v { return nil } } - return UnexpectedStatusCodeError{allowed, respCode} + err := getErrorFromResponse(resp) + return UnexpectedStatusCodeError{ + allowed: allowed, + got: resp.StatusCode, + inner: err, + } +} + +func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string { + metadata = c.protectUserAgent(metadata) + for k, v := range metadata { + h[userDefinedMetadataHeaderPrefix+k] = v + } + return h +} + +func getDebugHeaders(h http.Header) (requestID, date, version string) { + requestID = h.Get("x-ms-request-id") + version = h.Get("x-ms-version") + date = h.Get("Date") + return +} + +func getErrorFromResponse(resp *http.Response) error { + respBody, err := readAndCloseBody(resp.Body) + if err != nil { + return err + } + + requestID, date, version := getDebugHeaders(resp.Header) + if len(respBody) == 0 { + // no error in response body, might happen in HEAD requests + err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) + } else { + storageErr := AzureStorageServiceError{ + StatusCode: resp.StatusCode, + RequestID: requestID, + Date: date, + APIVersion: version, + } + // response contains storage service error object, unmarshal + if resp.Header.Get("Content-Type") == "application/xml" { + errIn := serviceErrFromXML(respBody, &storageErr) + if err != nil { // error unmarshaling the error response + err = errIn + } + } else { + errIn := serviceErrFromJSON(respBody, &storageErr) + if err != nil { // error unmarshaling the error response + err = errIn + } + } + err = storageErr + } + return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go new file mode 100644 index 00000000..e898e9bf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go @@ -0,0 +1,38 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/url" + "time" +) + +// SASOptions includes options used by SAS URIs for different +// services and resources. +type SASOptions struct { + APIVersion string + Start time.Time + Expiry time.Time + IP string + UseHTTPS bool + Identifier string +} + +func addQueryParameter(query url.Values, key, value string) url.Values { + if value != "" { + query.Add(key, value) + } + return query +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go index f0642396..056473d4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go @@ -1,13 +1,27 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" - "errors" "fmt" "io" "net/http" "net/url" "strconv" + "strings" "time" ) @@ -16,20 +30,76 @@ type Container struct { bsc *BlobStorageClient Name string `xml:"Name"` Properties ContainerProperties `xml:"Properties"` + Metadata map[string]string + sasuri url.URL +} + +// Client returns the HTTP client used by the Container reference. +func (c *Container) Client() *Client { + return &c.bsc.client } func (c *Container) buildPath() string { return fmt.Sprintf("/%s", c.Name) } +// GetURL gets the canonical URL to the container. +// This method does not create a publicly accessible URL if the container +// is private and this method does not check if the blob exists. +func (c *Container) GetURL() string { + container := c.Name + if container == "" { + container = "$root" + } + return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil) +} + +// ContainerSASOptions are options to construct a container SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type ContainerSASOptions struct { + ContainerSASPermissions + OverrideHeaders + SASOptions +} + +// ContainerSASPermissions includes the available permissions for +// a container SAS URI. +type ContainerSASPermissions struct { + BlobServiceSASPermissions + List bool +} + +// GetSASURI creates an URL to the container which contains the Shared +// Access Signature with the specified options. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) { + uri := c.GetURL() + signedResource := "c" + canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true) + if err != nil { + return "", err + } + + // build permissions string + permissions := options.BlobServiceSASPermissions.buildString() + if options.List { + permissions += "l" + } + + return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) +} + // ContainerProperties contains various properties of a container returned from // various endpoints like ListContainers. type ContainerProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` + PublicAccess ContainerAccessType `xml:"PublicAccess"` } // ContainerListResponse contains the response fields from @@ -69,6 +139,14 @@ type BlobListResponse struct { Delimiter string `xml:"Delimiter"` } +// IncludeBlobDataset has options to include in a list blobs operation +type IncludeBlobDataset struct { + Snapshots bool + Metadata bool + UncommittedBlobs bool + Copy bool +} + // ListBlobsParameters defines the set of customizable // parameters to make a List Blobs call. // @@ -77,9 +155,10 @@ type ListBlobsParameters struct { Prefix string Delimiter string Marker string - Include string + Include *IncludeBlobDataset MaxResults uint Timeout uint + RequestID string } func (p ListBlobsParameters) getParameters() url.Values { @@ -94,19 +173,32 @@ func (p ListBlobsParameters) getParameters() url.Values { if p.Marker != "" { out.Set("marker", p.Marker) } - if p.Include != "" { - out.Set("include", p.Include) + if p.Include != nil { + include := []string{} + include = addString(include, p.Include.Snapshots, "snapshots") + include = addString(include, p.Include.Metadata, "metadata") + include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs") + include = addString(include, p.Include.Copy, "copy") + fullInclude := strings.Join(include, ",") + out.Set("include", fullInclude) } if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) } if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) } return out } +func addString(datasets []string, include bool, text string) []string { + if include { + datasets = append(datasets, text) + } + return datasets +} + // ContainerAccessType defines the access level to the container from a public // request. // @@ -142,123 +234,160 @@ const ( ContainerAccessHeader string = "x-ms-blob-public-access" ) +// GetBlobReference returns a Blob object for the specified blob name. +func (c *Container) GetBlobReference(name string) *Blob { + return &Blob{ + Container: c, + Name: name, + } +} + +// CreateContainerOptions includes the options for a create container operation +type CreateContainerOptions struct { + Timeout uint + Access ContainerAccessType `header:"x-ms-blob-public-access"` + RequestID string `header:"x-ms-client-request-id"` +} + // Create creates a blob container within the storage account // with given name and access level. Returns error if container already exists. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx -func (c *Container) Create() error { - resp, err := c.create() +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container +func (c *Container) Create(options *CreateContainerOptions) error { + resp, err := c.create(options) if err != nil { return err } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusCreated}) } // CreateIfNotExists creates a blob container if it does not exist. Returns // true if container is newly created or false if container already exists. -func (c *Container) CreateIfNotExists() (bool, error) { - resp, err := c.create() +func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) { + resp, err := c.create(options) if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil + defer drainRespBody(resp) + if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { + return resp.StatusCode == http.StatusCreated, nil } } return false, err } -func (c *Container) create() (*storageResponse, error) { - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) +func (c *Container) create(options *CreateContainerOptions) (*http.Response, error) { + query := url.Values{"restype": {"container"}} headers := c.bsc.client.getStandardHeaders() + headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) + return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) } // Exists returns true if a container with given name exists // on the storage account, otherwise returns false. func (c *Container) Exists() (bool, error) { - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) + q := url.Values{"restype": {"container"}} + var uri string + if c.bsc.client.isServiceSASClient() { + q = mergeParams(q, c.sasuri.Query()) + newURI := c.sasuri + newURI.RawQuery = q.Encode() + uri = newURI.String() + + } else { + uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + } headers := c.bsc.client.getStandardHeaders() resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil + defer drainRespBody(resp) + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusOK, nil } } return false, err } -// SetPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx -func (c *Container) SetPermissions(permissions ContainerPermissions, timeout int, leaseID string) error { +// SetContainerPermissionOptions includes options for a set container permissions operation +type SetContainerPermissionOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetPermissions sets up container permissions +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL +func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error { + body, length, err := generateContainerACLpayload(permissions.AccessPolicies) + if err != nil { + return err + } params := url.Values{ "restype": {"container"}, "comp": {"acl"}, } - - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) headers := c.bsc.client.getStandardHeaders() - if permissions.AccessType != "" { - headers[ContainerAccessHeader] = string(permissions.AccessType) - } - - if leaseID != "" { - headers[headerLeaseID] = leaseID - } - - body, length, err := generateContainerACLpayload(permissions.AccessPolicies) + headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType)) headers["Content-Length"] = strconv.Itoa(length) + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth) if err != nil { return err } - defer readAndCloseBody(resp.body) + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusOK}) +} - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return errors.New("Unable to set permissions") - } - - return nil +// GetContainerPermissionOptions includes options for a get container permissions operation +type GetContainerPermissionOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` } // GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx // If timeout is 0 then it will not be passed to Azure // leaseID will only be passed to Azure if populated -func (c *Container) GetPermissions(timeout int, leaseID string) (*ContainerPermissions, error) { +func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) { params := url.Values{ "restype": {"container"}, "comp": {"acl"}, } - - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) headers := c.bsc.client.getStandardHeaders() - if leaseID != "" { - headers[headerLeaseID] = leaseID + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) if err != nil { return nil, err } - defer resp.body.Close() + defer resp.Body.Close() var ap AccessPolicy - err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) + err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) if err != nil { return nil, err } - return buildAccessPolicy(ap, &resp.headers), nil + return buildAccessPolicy(ap, &resp.Header), nil } func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions { @@ -284,17 +413,26 @@ func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissi return &permissions } +// DeleteContainerOptions includes options for a delete container operation +type DeleteContainerOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + // Delete deletes the container with given name on the storage // account. If the container does not exist returns error. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (c *Container) Delete() error { - resp, err := c.delete() +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container +func (c *Container) Delete(options *DeleteContainerOptions) error { + resp, err := c.delete(options) if err != nil { return err } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusAccepted}) } // DeleteIfExists deletes the container with given name on the storage @@ -302,47 +440,142 @@ func (c *Container) Delete() error { // false if the container did not exist at the time of the Delete Container // operation. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (c *Container) DeleteIfExists() (bool, error) { - resp, err := c.delete() +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container +func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) { + resp, err := c.delete(options) if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil + defer drainRespBody(resp) + if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusAccepted, nil } } return false, err } -func (c *Container) delete() (*storageResponse, error) { - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) +func (c *Container) delete(options *DeleteContainerOptions) (*http.Response, error) { + query := url.Values{"restype": {"container"}} headers := c.bsc.client.getStandardHeaders() + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) + return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth) } // ListBlobs returns an object that contains list of blobs in the container, // pagination token and other information in the response of List Blobs call. // -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { q := mergeParams(params.getParameters(), url.Values{ "restype": {"container"}, - "comp": {"list"}}, - ) - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + "comp": {"list"}, + }) + var uri string + if c.bsc.client.isServiceSASClient() { + q = mergeParams(q, c.sasuri.Query()) + newURI := c.sasuri + newURI.RawQuery = q.Encode() + uri = newURI.String() + } else { + uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + } + headers := c.bsc.client.getStandardHeaders() + headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID) var out BlobListResponse resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) if err != nil { return out, err } - defer resp.body.Close() + defer resp.Body.Close() - err = xmlUnmarshal(resp.body, &out) + err = xmlUnmarshal(resp.Body, &out) + for i := range out.Blobs { + out.Blobs[i].Container = c + } return out, err } +// ContainerMetadataOptions includes options for container metadata operations +type ContainerMetadataOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetMetadata replaces the metadata for the specified container. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata +func (c *Container) SetMetadata(options *ContainerMetadataOptions) error { + params := url.Values{ + "comp": {"metadata"}, + "restype": {"container"}, + } + headers := c.bsc.client.getStandardHeaders() + headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusOK}) +} + +// GetMetadata returns all user-defined metadata for the specified container. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata +func (c *Container) GetMetadata(options *ContainerMetadataOptions) error { + params := url.Values{ + "comp": {"metadata"}, + "restype": {"container"}, + } + headers := c.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { + return err + } + + c.writeMetadata(resp.Header) + return nil +} + +func (c *Container) writeMetadata(h http.Header) { + c.Metadata = writeMetadata(h) +} + func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { sil := SignedIdentifiers{ SignedIdentifiers: []SignedIdentifier{}, @@ -374,3 +607,34 @@ func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions s return permissions } + +// GetProperties updated the properties of the container. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties +func (c *Container) GetProperties() error { + params := url.Values{ + "restype": {"container"}, + } + headers := c.bsc.client.getStandardHeaders() + + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return err + } + defer resp.Body.Close() + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { + return err + } + + // update properties + c.Properties.Etag = resp.Header.Get(headerEtag) + c.Properties.LeaseStatus = resp.Header.Get("x-ms-lease-status") + c.Properties.LeaseState = resp.Header.Get("x-ms-lease-state") + c.Properties.LeaseDuration = resp.Header.Get("x-ms-lease-duration") + c.Properties.LastModified = resp.Header.Get("Last-Modified") + c.Properties.PublicAccess = ContainerAccessType(resp.Header.Get(ContainerAccessHeader)) + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go new file mode 100644 index 00000000..151e9a51 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go @@ -0,0 +1,237 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + blobCopyStatusPending = "pending" + blobCopyStatusSuccess = "success" + blobCopyStatusAborted = "aborted" + blobCopyStatusFailed = "failed" +) + +// CopyOptions includes the options for a copy blob operation +type CopyOptions struct { + Timeout uint + Source CopyOptionsConditions + Destiny CopyOptionsConditions + RequestID string +} + +// IncrementalCopyOptions includes the options for an incremental copy blob operation +type IncrementalCopyOptions struct { + Timeout uint + Destination IncrementalCopyOptionsConditions + RequestID string +} + +// CopyOptionsConditions includes some conditional options in a copy blob operation +type CopyOptionsConditions struct { + LeaseID string + IfModifiedSince *time.Time + IfUnmodifiedSince *time.Time + IfMatch string + IfNoneMatch string +} + +// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation +type IncrementalCopyOptionsConditions struct { + IfModifiedSince *time.Time + IfUnmodifiedSince *time.Time + IfMatch string + IfNoneMatch string +} + +// Copy starts a blob copy operation and waits for the operation to +// complete. sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using the GetURL method.) There is no SLA on blob copy and therefore +// this helper method works faster on smaller files. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob +func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error { + copyID, err := b.StartCopy(sourceBlob, options) + if err != nil { + return err + } + + return b.WaitForCopy(copyID) +} + +// StartCopy starts a blob copy operation. +// sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using the GetURL method.) +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob +func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-source"] = sourceBlob + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + // source + headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID) + headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch) + headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch) + //destiny + headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID) + headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch) + headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return "", err + } + defer drainRespBody(resp) + + if err := checkRespCode(resp, []int{http.StatusAccepted, http.StatusCreated}); err != nil { + return "", err + } + + copyID := resp.Header.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} + +// AbortCopyOptions includes the options for an abort blob operation +type AbortCopyOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. +// copyID is generated from StartBlobCopy function. +// currentLeaseID is required IF the destination blob has an active lease on it. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob +func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error { + params := url.Values{ + "comp": {"copy"}, + "copyid": {copyID}, + } + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-action"] = "abort" + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +// WaitForCopy loops until a BlobCopy operation is completed (or fails with error) +func (b *Blob) WaitForCopy(copyID string) error { + for { + err := b.GetProperties(nil) + if err != nil { + return err + } + + if b.Properties.CopyID != copyID { + return errBlobCopyIDMismatch + } + + switch b.Properties.CopyStatus { + case blobCopyStatusSuccess: + return nil + case blobCopyStatusPending: + continue + case blobCopyStatusAborted: + return errBlobCopyAborted + case blobCopyStatusFailed: + return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription) + default: + return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus) + } + } +} + +// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob +// sourceBlob parameter must be a valid snapshot URL of the original blob. +// THe original blob mut be public, or use a Shared Access Signature. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob . +func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) { + params := url.Values{"comp": {"incrementalcopy"}} + + // need formatting to 7 decimal places so it's friendly to Windows and *nix + snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z") + u, err := url.Parse(sourceBlobURL) + if err != nil { + return "", err + } + query := u.Query() + query.Add("snapshot", snapshotTimeFormatted) + encodedQuery := query.Encode() + encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1) + u.RawQuery = encodedQuery + snapshotURL := u.String() + + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-source"] = snapshotURL + + if options != nil { + addTimeout(params, options.Timeout) + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch) + headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch) + } + + // get URI of destination blob + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return "", err + } + defer drainRespBody(resp) + + if err := checkRespCode(resp, []int{http.StatusAccepted}); err != nil { + return "", err + } + + copyID := resp.Header.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go index d27e6207..2e805e7d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go @@ -1,9 +1,24 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "net/http" "net/url" + "sync" ) // Directory represents a directory on a share. @@ -25,8 +40,9 @@ type DirectoryProperties struct { // ListDirsAndFilesParameters defines the set of customizable parameters to // make a List Files and Directories call. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files type ListDirsAndFilesParameters struct { + Prefix string Marker string MaxResults uint Timeout uint @@ -35,7 +51,7 @@ type ListDirsAndFilesParameters struct { // DirsAndFilesListResponse contains the response fields from // a List Files and Directories call. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files type DirsAndFilesListResponse struct { XMLName xml.Name `xml:"EnumerationResults"` Xmlns string `xml:"xmlns,attr"` @@ -60,14 +76,15 @@ func (d *Directory) buildPath() string { // Create this directory in the associated share. // If a directory with the same name already exists, the operation fails. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx -func (d *Directory) Create() error { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory +func (d *Directory) Create(options *FileRequestOptions) error { // if this is the root directory exit early if d.parent == nil { return nil } - headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, nil, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated}) + params := prepareOptions(options) + headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated}) if err != nil { return err } @@ -80,23 +97,24 @@ func (d *Directory) Create() error { // directory does not exists. Returns true if the directory is newly created or // false if the directory already exists. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx -func (d *Directory) CreateIfNotExists() (bool, error) { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory +func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) { // if this is the root directory exit early if d.parent == nil { return false, nil } - resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, nil, nil) + params := prepareOptions(options) + resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil) if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - if resp.statusCode == http.StatusCreated { - d.updateEtagAndLastModified(resp.headers) + defer drainRespBody(resp) + if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { + if resp.StatusCode == http.StatusCreated { + d.updateEtagAndLastModified(resp.Header) return true, nil } - return false, d.FetchAttributes() + return false, d.FetchAttributes(nil) } } @@ -106,20 +124,20 @@ func (d *Directory) CreateIfNotExists() (bool, error) { // Delete removes this directory. It must be empty in order to be deleted. // If the directory does not exist the operation fails. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx -func (d *Directory) Delete() error { - return d.fsc.deleteResource(d.buildPath(), resourceDirectory) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory +func (d *Directory) Delete(options *FileRequestOptions) error { + return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options) } // DeleteIfExists removes this directory if it exists. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx -func (d *Directory) DeleteIfExists() (bool, error) { - resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory +func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options) if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil + defer drainRespBody(resp) + if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusAccepted, nil } } return false, err @@ -135,8 +153,10 @@ func (d *Directory) Exists() (bool, error) { } // FetchAttributes retrieves metadata for this directory. -func (d *Directory) FetchAttributes() error { - headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, http.MethodHead) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties +func (d *Directory) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead) if err != nil { return err } @@ -164,13 +184,14 @@ func (d *Directory) GetFileReference(name string) *File { Name: name, parent: d, share: d.share, + mutex: &sync.Mutex{}, } } // ListDirsAndFiles returns a list of files and directories under this directory. // It also contains a pagination token and other response details. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) { q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) @@ -179,9 +200,9 @@ func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAn return nil, err } - defer resp.body.Close() + defer resp.Body.Close() var out DirsAndFilesListResponse - err = xmlUnmarshal(resp.body, &out) + err = xmlUnmarshal(resp.Body, &out) return &out, err } @@ -192,9 +213,9 @@ func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAn // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx -func (d *Directory) SetMetadata() error { - headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil)) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata +func (d *Directory) SetMetadata(options *FileRequestOptions) error { + headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options) if err != nil { return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go new file mode 100644 index 00000000..fbbcb93b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go @@ -0,0 +1,456 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/satori/go.uuid" +) + +// Annotating as secure for gas scanning +/* #nosec */ +const ( + partitionKeyNode = "PartitionKey" + rowKeyNode = "RowKey" + etagErrorTemplate = "Etag didn't match: %v" +) + +var ( + errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation") + errNilPreviousResult = errors.New("The previous results page is nil") + errNilNextLink = errors.New("There are no more pages in this query results") +) + +// Entity represents an entity inside an Azure table. +type Entity struct { + Table *Table + PartitionKey string + RowKey string + TimeStamp time.Time + OdataMetadata string + OdataType string + OdataID string + OdataEtag string + OdataEditLink string + Properties map[string]interface{} +} + +// GetEntityReference returns an Entity object with the specified +// partition key and row key. +func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity { + return &Entity{ + PartitionKey: partitionKey, + RowKey: rowKey, + Table: t, + } +} + +// EntityOptions includes options for entity operations. +type EntityOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// GetEntityOptions includes options for a get entity operation +type GetEntityOptions struct { + Select []string + RequestID string `header:"x-ms-client-request-id"` +} + +// Get gets the referenced entity. Which properties to get can be +// specified using the select option. +// See: +// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities +func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error { + if ml == EmptyPayload { + return errEmptyPayload + } + // RowKey and PartitionKey could be lost if not included in the query + // As those are the entity identifiers, it is best if they are not lost + rk := e.RowKey + pk := e.PartitionKey + + query := url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + } + headers := e.Table.tsc.client.getStandardHeaders() + headers[headerAccept] = string(ml) + + if options != nil { + if len(options.Select) > 0 { + query.Add("$select", strings.Join(options.Select, ",")) + } + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + return err + } + + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + err = json.Unmarshal(respBody, e) + if err != nil { + return err + } + e.PartitionKey = pk + e.RowKey = rk + + return nil +} + +// Insert inserts the referenced entity in its table. +// The function fails if there is an entity with the same +// PartitionKey and RowKey in the table. +// ml determines the level of detail of metadata in the operation response, +// or no data at all. +// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity +func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addReturnContentHeaders(headers, ml) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + if ml != EmptyPayload { + if err = checkRespCode(resp, []int{http.StatusCreated}); err != nil { + return err + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + if err = e.UnmarshalJSON(data); err != nil { + return err + } + } else { + if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { + return err + } + } + + return nil +} + +// Update updates the contents of an entity. The function fails if there is no entity +// with the same PartitionKey and RowKey in the table or if the ETag is different +// than the one in Azure. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2 +func (e *Entity) Update(force bool, options *EntityOptions) error { + return e.updateMerge(force, http.MethodPut, options) +} + +// Merge merges the contents of entity specified with PartitionKey and RowKey +// with the content specified in Properties. +// The function fails if there is no entity with the same PartitionKey and +// RowKey in the table or if the ETag is different than the one in Azure. +// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity +func (e *Entity) Merge(force bool, options *EntityOptions) error { + return e.updateMerge(force, "MERGE", options) +} + +// Delete deletes the entity. +// The function fails if there is no entity with the same PartitionKey and +// RowKey in the table or if the ETag is different than the one in Azure. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1 +func (e *Entity) Delete(force bool, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + headers = addIfMatchHeader(headers, force, e.OdataEtag) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth) + if err != nil { + if resp.StatusCode == http.StatusPreconditionFailed { + return fmt.Errorf(etagErrorTemplate, err) + } + return err + } + defer drainRespBody(resp) + + if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateTimestamp(resp.Header) +} + +// InsertOrReplace inserts an entity or replaces the existing one. +// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity +func (e *Entity) InsertOrReplace(options *EntityOptions) error { + return e.insertOr(http.MethodPut, options) +} + +// InsertOrMerge inserts an entity or merges the existing one. +// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity +func (e *Entity) InsertOrMerge(options *EntityOptions) error { + return e.insertOr("MERGE", options) +} + +func (e *Entity) buildPath() string { + return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey) +} + +// MarshalJSON is a custom marshaller for entity +func (e *Entity) MarshalJSON() ([]byte, error) { + completeMap := map[string]interface{}{} + completeMap[partitionKeyNode] = e.PartitionKey + completeMap[rowKeyNode] = e.RowKey + for k, v := range e.Properties { + typeKey := strings.Join([]string{k, OdataTypeSuffix}, "") + switch t := v.(type) { + case []byte: + completeMap[typeKey] = OdataBinary + completeMap[k] = t + case time.Time: + completeMap[typeKey] = OdataDateTime + completeMap[k] = t.Format(time.RFC3339Nano) + case uuid.UUID: + completeMap[typeKey] = OdataGUID + completeMap[k] = t.String() + case int64: + completeMap[typeKey] = OdataInt64 + completeMap[k] = fmt.Sprintf("%v", v) + default: + completeMap[k] = v + } + if strings.HasSuffix(k, OdataTypeSuffix) { + if !(completeMap[k] == OdataBinary || + completeMap[k] == OdataDateTime || + completeMap[k] == OdataGUID || + completeMap[k] == OdataInt64) { + return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k) + } + valueKey := strings.TrimSuffix(k, OdataTypeSuffix) + if _, ok := completeMap[valueKey]; !ok { + return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k) + } + } + } + return json.Marshal(completeMap) +} + +// UnmarshalJSON is a custom unmarshaller for entities +func (e *Entity) UnmarshalJSON(data []byte) error { + errorTemplate := "Deserializing error: %v" + + props := map[string]interface{}{} + err := json.Unmarshal(data, &props) + if err != nil { + return err + } + + // deselialize metadata + e.OdataMetadata = stringFromMap(props, "odata.metadata") + e.OdataType = stringFromMap(props, "odata.type") + e.OdataID = stringFromMap(props, "odata.id") + e.OdataEtag = stringFromMap(props, "odata.etag") + e.OdataEditLink = stringFromMap(props, "odata.editLink") + e.PartitionKey = stringFromMap(props, partitionKeyNode) + e.RowKey = stringFromMap(props, rowKeyNode) + + // deserialize timestamp + timeStamp, ok := props["Timestamp"] + if ok { + str, ok := timeStamp.(string) + if !ok { + return fmt.Errorf(errorTemplate, "Timestamp casting error") + } + t, err := time.Parse(time.RFC3339Nano, str) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + e.TimeStamp = t + } + delete(props, "Timestamp") + delete(props, "Timestamp@odata.type") + + // deserialize entity (user defined fields) + for k, v := range props { + if strings.HasSuffix(k, OdataTypeSuffix) { + valueKey := strings.TrimSuffix(k, OdataTypeSuffix) + str, ok := props[valueKey].(string) + if !ok { + return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v)) + } + switch v { + case OdataBinary: + props[valueKey], err = base64.StdEncoding.DecodeString(str) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + case OdataDateTime: + t, err := time.Parse("2006-01-02T15:04:05Z", str) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + props[valueKey] = t + case OdataGUID: + props[valueKey] = uuid.FromStringOrNil(str) + case OdataInt64: + i, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + props[valueKey] = i + default: + return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v)) + } + delete(props, k) + } + } + + e.Properties = props + return nil +} + +func getAndDelete(props map[string]interface{}, key string) interface{} { + if value, ok := props[key]; ok { + delete(props, key) + return value + } + return nil +} + +func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string { + if force { + h[headerIfMatch] = "*" + } else { + h[headerIfMatch] = etag + } + return h +} + +// updates Etag and timestamp +func (e *Entity) updateEtagAndTimestamp(headers http.Header) error { + e.OdataEtag = headers.Get(headerEtag) + return e.updateTimestamp(headers) +} + +func (e *Entity) updateTimestamp(headers http.Header) error { + str := headers.Get(headerDate) + t, err := time.Parse(time.RFC1123, str) + if err != nil { + return fmt.Errorf("Update timestamp error: %v", err) + } + e.TimeStamp = t + return nil +} + +func (e *Entity) insertOr(verb string, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateEtagAndTimestamp(resp.Header) +} + +func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addIfMatchHeader(headers, force, e.OdataEtag) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + if resp.StatusCode == http.StatusPreconditionFailed { + return fmt.Errorf(etagErrorTemplate, err) + } + return err + } + defer drainRespBody(resp) + + if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateEtagAndTimestamp(resp.Header) +} + +func stringFromMap(props map[string]interface{}, key string) string { + value := getAndDelete(props, key) + if value != nil { + return value.(string) + } + return "" +} + +func (options *EntityOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + query = addTimeout(query, options.Timeout) + headers = headersFromStruct(*options) + } + return query, headers +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go index e4901a11..06bbe4ba 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "errors" "fmt" @@ -8,11 +22,16 @@ import ( "net/http" "net/url" "strconv" + "sync" ) const fourMB = uint64(4194304) const oneTB = uint64(1099511627776) +// Export maximum range and file sizes +const MaxRangeSize = fourMB +const MaxFileSize = oneTB + // File represents a file on a share. type File struct { fsc *FileServiceClient @@ -22,6 +41,7 @@ type File struct { Properties FileProperties `xml:"Properties"` share *Share FileCopyProperties FileCopyState + mutex *sync.Mutex } // FileProperties contains various properties of a file. @@ -32,7 +52,7 @@ type FileProperties struct { Etag string Language string `header:"x-ms-content-language"` LastModified string - Length uint64 `xml:"Content-Length"` + Length uint64 `xml:"Content-Length" header:"x-ms-content-length"` MD5 string `header:"x-ms-content-md5"` Type string `header:"x-ms-content-type"` } @@ -54,26 +74,22 @@ type FileStream struct { } // FileRequestOptions will be passed to misc file operations. -// Currently just Timeout (in seconds) but will expand. +// Currently just Timeout (in seconds) but could expand. type FileRequestOptions struct { Timeout uint // timeout duration in seconds. } -// getParameters, construct parameters for FileRequestOptions. -// currently only timeout, but expecting to grow as functionality fills out. -func (p FileRequestOptions) getParameters() url.Values { - out := url.Values{} - - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) +func prepareOptions(options *FileRequestOptions) url.Values { + params := url.Values{} + if options != nil { + params = addTimeout(params, options.Timeout) } - - return out + return params } // FileRanges contains a list of file range information for a file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges type FileRanges struct { ContentLength uint64 LastModified string @@ -83,7 +99,7 @@ type FileRanges struct { // FileRange contains range information for a file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges type FileRange struct { Start uint64 `xml:"Start"` End uint64 `xml:"End"` @@ -100,9 +116,13 @@ func (f *File) buildPath() string { // ClearRange releases the specified range of space in a file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx -func (f *File) ClearRange(fileRange FileRange) error { - headers, err := f.modifyRange(nil, fileRange, nil) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range +func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error { + var timeout *uint + if options != nil { + timeout = &options.Timeout + } + headers, err := f.modifyRange(nil, fileRange, timeout, nil) if err != nil { return err } @@ -113,24 +133,23 @@ func (f *File) ClearRange(fileRange FileRange) error { // Create creates a new file or replaces an existing one. // -// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx -func (f *File) Create(maxSize uint64) error { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File +func (f *File) Create(maxSize uint64, options *FileRequestOptions) error { if maxSize > oneTB { return fmt.Errorf("max file size is 1TB") } + params := prepareOptions(options) + headers := headersFromStruct(f.Properties) + headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10) + headers["x-ms-type"] = "file" - extraHeaders := map[string]string{ - "x-ms-content-length": strconv.FormatUint(maxSize, 10), - "x-ms-type": "file", - } - - headers, err := f.fsc.createResource(f.buildPath(), resourceFile, nil, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusCreated}) + outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated}) if err != nil { return err } f.Properties.Length = maxSize - f.updateEtagAndLastModified(headers) + f.updateEtagAndLastModified(outputHeaders) return nil } @@ -142,70 +161,94 @@ func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error { "x-ms-type": "file", "x-ms-copy-source": sourceURL, } + params := prepareOptions(options) - var parameters url.Values - if options != nil { - parameters = options.getParameters() - } - - headers, err := f.fsc.createResource(f.buildPath(), resourceFile, parameters, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted}) + headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted}) if err != nil { return err } - f.updateEtagLastModifiedAndCopyHeaders(headers) + f.updateEtagAndLastModified(headers) + f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") + f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") return nil } // Delete immediately removes this file from the storage account. // -// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx -func (f *File) Delete() error { - return f.fsc.deleteResource(f.buildPath(), resourceFile) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 +func (f *File) Delete(options *FileRequestOptions) error { + return f.fsc.deleteResource(f.buildPath(), resourceFile, options) } // DeleteIfExists removes this file if it exists. // -// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx -func (f *File) DeleteIfExists() (bool, error) { - resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 +func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options) if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil + defer drainRespBody(resp) + if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusAccepted, nil } } return false, err } +// GetFileOptions includes options for a get file operation +type GetFileOptions struct { + Timeout uint + GetContentMD5 bool +} + +// DownloadToStream operation downloads the file. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file +func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) { + params := prepareOptions(options) + resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil) + if err != nil { + return nil, err + } + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + drainRespBody(resp) + return nil, err + } + return resp.Body, nil +} + // DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash. // // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file -func (f *File) DownloadRangeToStream(fileRange FileRange, getContentMD5 bool) (fs FileStream, err error) { - if getContentMD5 && isRangeTooBig(fileRange) { - return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") - } - +func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) { extraHeaders := map[string]string{ "Range": fileRange.String(), } - if getContentMD5 == true { - extraHeaders["x-ms-range-get-content-md5"] = "true" + params := url.Values{} + if options != nil { + if options.GetContentMD5 { + if isRangeTooBig(fileRange) { + return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") + } + extraHeaders["x-ms-range-get-content-md5"] = "true" + } + params = addTimeout(params, options.Timeout) } - resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, http.MethodGet, extraHeaders) + resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders) if err != nil { return fs, err } - if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil { - resp.body.Close() + if err = checkRespCode(resp, []int{http.StatusOK, http.StatusPartialContent}); err != nil { + drainRespBody(resp) return fs, err } - fs.Body = resp.body - if getContentMD5 { - fs.ContentMD5 = resp.headers.Get("Content-MD5") + fs.Body = resp.Body + if options != nil && options.GetContentMD5 { + fs.ContentMD5 = resp.Header.Get("Content-MD5") } return fs, nil } @@ -221,8 +264,10 @@ func (f *File) Exists() (bool, error) { } // FetchAttributes updates metadata and properties for this file. -func (f *File) FetchAttributes() error { - headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, http.MethodHead) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties +func (f *File) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead) if err != nil { return err } @@ -242,17 +287,26 @@ func isRangeTooBig(fileRange FileRange) bool { return false } +// ListRangesOptions includes options for a list file ranges operation +type ListRangesOptions struct { + Timeout uint + ListRange *FileRange +} + // ListRanges returns the list of valid ranges for this file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx -func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges +func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) { params := url.Values{"comp": {"rangelist"}} // add optional range to list var headers map[string]string - if listRange != nil { - headers = make(map[string]string) - headers["Range"] = listRange.String() + if options != nil { + params = addTimeout(params, options.Timeout) + if options.ListRange != nil { + headers = make(map[string]string) + headers["Range"] = options.ListRange.String() + } } resp, err := f.fsc.listContent(f.buildPath(), params, headers) @@ -260,25 +314,25 @@ func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) { return nil, err } - defer resp.body.Close() + defer resp.Body.Close() var cl uint64 - cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64) + cl, err = strconv.ParseUint(resp.Header.Get("x-ms-content-length"), 10, 64) if err != nil { - ioutil.ReadAll(resp.body) + ioutil.ReadAll(resp.Body) return nil, err } var out FileRanges out.ContentLength = cl - out.ETag = resp.headers.Get("ETag") - out.LastModified = resp.headers.Get("Last-Modified") + out.ETag = resp.Header.Get("ETag") + out.LastModified = resp.Header.Get("Last-Modified") - err = xmlUnmarshal(resp.body, &out) + err = xmlUnmarshal(resp.Body, &out) return &out, err } // modifies a range of bytes in this file -func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) (http.Header, error) { +func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) { if err := f.fsc.checkForStorageEmulator(); err != nil { return nil, err } @@ -289,7 +343,12 @@ func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *str return nil, errors.New("range cannot exceed 4MB in size") } - uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{"comp": {"range"}}) + params := url.Values{"comp": {"range"}} + if timeout != nil { + params = addTimeout(params, *timeout) + } + + uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params) // default to clear write := "clear" @@ -316,8 +375,8 @@ func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *str if err != nil { return nil, err } - defer readAndCloseBody(resp.body) - return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated}) + defer drainRespBody(resp) + return resp.Header, checkRespCode(resp, []int{http.StatusCreated}) } // SetMetadata replaces the metadata for this file. @@ -327,9 +386,9 @@ func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *str // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/dn689097.aspx -func (f *File) SetMetadata() error { - headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil)) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata +func (f *File) SetMetadata(options *FileRequestOptions) error { + headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options) if err != nil { return err } @@ -345,9 +404,9 @@ func (f *File) SetMetadata() error { // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx -func (f *File) SetProperties() error { - headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties)) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties +func (f *File) SetProperties(options *FileRequestOptions) error { + headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options) if err != nil { return err } @@ -362,14 +421,6 @@ func (f *File) updateEtagAndLastModified(headers http.Header) { f.Properties.LastModified = headers.Get("Last-Modified") } -// updates Etag, last modified date and x-ms-copy-id -func (f *File) updateEtagLastModifiedAndCopyHeaders(headers http.Header) { - f.Properties.Etag = headers.Get("Etag") - f.Properties.LastModified = headers.Get("Last-Modified") - f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") - f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") -} - // updates file properties from the specified HTTP header func (f *File) updateProperties(header http.Header) { size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) @@ -390,23 +441,40 @@ func (f *File) updateProperties(header http.Header) { // This method does not create a publicly accessible URL if the file // is private and this method does not check if the file exists. func (f *File) URL() string { - return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{}) + return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil) } -// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content. -// Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB. +// WriteRangeOptions includes options for a write file range operation +type WriteRangeOptions struct { + Timeout uint + ContentMD5 string +} + +// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside +// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with +// a maximum size of 4MB. // -// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx -func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) error { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range +func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error { if bytes == nil { return errors.New("bytes cannot be nil") } + var timeout *uint + var md5 *string + if options != nil { + timeout = &options.Timeout + md5 = &options.ContentMD5 + } - headers, err := f.modifyRange(bytes, fileRange, contentMD5) + headers, err := f.modifyRange(bytes, fileRange, timeout, md5) if err != nil { return err } - + // it's perfectly legal for multiple go routines to call WriteRange + // on the same *File (e.g. concurrently writing non-overlapping ranges) + // so we must take the file mutex before updating our properties. + f.mutex.Lock() f.updateEtagAndLastModified(headers) + f.mutex.Unlock() return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go index d68bd7f6..1db8e7da 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go @@ -1,11 +1,25 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "fmt" "net/http" "net/url" - "strings" + "strconv" ) // FileServiceClient contains operations for Microsoft Azure File Service. @@ -17,7 +31,7 @@ type FileServiceClient struct { // ListSharesParameters defines the set of customizable parameters to make a // List Shares call. // -// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares type ListSharesParameters struct { Prefix string Marker string @@ -29,7 +43,7 @@ type ListSharesParameters struct { // ShareListResponse contains the response fields from // ListShares call. // -// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares type ShareListResponse struct { XMLName xml.Name `xml:"EnumerationResults"` Xmlns string `xml:"xmlns,attr"` @@ -79,10 +93,10 @@ func (p ListSharesParameters) getParameters() url.Values { out.Set("include", p.Include) } if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) } if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) } return out @@ -91,15 +105,16 @@ func (p ListSharesParameters) getParameters() url.Values { func (p ListDirsAndFilesParameters) getParameters() url.Values { out := url.Values{} + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } if p.Marker != "" { out.Set("marker", p.Marker) } if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) } + out = addTimeout(out, p.Timeout) return out } @@ -117,9 +132,9 @@ func getURLInitValues(comp compType, res resourceType) url.Values { } // GetShareReference returns a Share object for the specified share name. -func (f FileServiceClient) GetShareReference(name string) Share { - return Share{ - fsc: &f, +func (f *FileServiceClient) GetShareReference(name string) *Share { + return &Share{ + fsc: f, Name: name, Properties: ShareProperties{ Quota: -1, @@ -130,7 +145,7 @@ func (f FileServiceClient) GetShareReference(name string) Share { // ListShares returns the list of shares in a storage account along with // pagination token and other response details. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) { q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) @@ -139,8 +154,8 @@ func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListRe if err != nil { return nil, err } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &out) + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &out) // assign our client to the newly created Share objects for i := range out.Shares { @@ -164,7 +179,7 @@ func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error } // retrieves directory or share content -func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) { +func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*http.Response, error) { if err := f.checkForStorageEmulator(); err != nil { return nil, err } @@ -178,8 +193,8 @@ func (f FileServiceClient) listContent(path string, params url.Values, extraHead return nil, err } - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - readAndCloseBody(resp.body) + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + drainRespBody(resp) return nil, err } @@ -197,9 +212,9 @@ func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth) if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, resp.headers, nil + defer drainRespBody(resp) + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusOK, resp.Header, nil } } return false, nil, err @@ -211,12 +226,12 @@ func (f FileServiceClient) createResource(path string, res resourceType, urlPara if err != nil { return nil, err } - defer readAndCloseBody(resp.body) - return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes) + defer drainRespBody(resp) + return resp.Header, checkRespCode(resp, expectedResponseCodes) } // creates a resource depending on the specified resource type, doesn't close the response body -func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) { +func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*http.Response, error) { if err := f.checkForStorageEmulator(); err != nil { return nil, err } @@ -231,51 +246,50 @@ func (f FileServiceClient) createResourceNoClose(path string, res resourceType, } // returns HTTP header data for the specified directory or share -func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) { - resp, err := f.getResourceNoClose(path, comp, res, verb, nil) +func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) { + resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil) if err != nil { return nil, err } - defer readAndCloseBody(resp.body) + defer drainRespBody(resp) - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { return nil, err } - return resp.headers, nil + return resp.Header, nil } // gets the specified resource, doesn't close the response body -func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) { +func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*http.Response, error) { if err := f.checkForStorageEmulator(); err != nil { return nil, err } - params := getURLInitValues(comp, res) + params = mergeParams(params, getURLInitValues(comp, res)) uri := f.client.getEndpoint(fileServiceName, path, params) - extraHeaders = f.client.protectUserAgent(extraHeaders) headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) return f.client.exec(verb, uri, headers, nil, f.auth) } // deletes the resource and returns the response -func (f FileServiceClient) deleteResource(path string, res resourceType) error { - resp, err := f.deleteResourceNoClose(path, res) +func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error { + resp, err := f.deleteResourceNoClose(path, res, options) if err != nil { return err } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusAccepted}) } // deletes the resource and returns the response, doesn't close the response body -func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) { +func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*http.Response, error) { if err := f.checkForStorageEmulator(); err != nil { return nil, err } - values := getURLInitValues(compNone, res) + values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options)) uri := f.client.getEndpoint(fileServiceName, path, values) return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth) } @@ -294,21 +308,13 @@ func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[strin return extraHeaders } -// merges extraHeaders into headers and returns headers -func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { - for k, v := range extraHeaders { - headers[k] = v - } - return headers -} - // sets extra header data for the specified resource -func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) (http.Header, error) { +func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) { if err := f.checkForStorageEmulator(); err != nil { return nil, err } - params := getURLInitValues(comp, res) + params := mergeParams(getURLInitValues(comp, res), prepareOptions(options)) uri := f.client.getEndpoint(fileServiceName, path, params) extraHeaders = f.client.protectUserAgent(extraHeaders) headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) @@ -317,52 +323,9 @@ func (f FileServiceClient) setResourceHeaders(path string, comp compType, res re if err != nil { return nil, err } - defer readAndCloseBody(resp.body) + defer drainRespBody(resp) - return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// gets metadata for the specified resource -func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - - headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet) - if err != nil { - return nil, err - } - - return getMetadataFromHeaders(headers), nil -} - -// returns a map of custom metadata values from the specified HTTP header -func getMetadataFromHeaders(header http.Header) map[string]string { - metadata := make(map[string]string) - for k, v := range header { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["foo"] = content of the last X-Ms-Meta-Foo header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - - if len(metadata) == 0 { - return nil - } - - return metadata + return resp.Header, checkRespCode(resp, []int{http.StatusOK}) } //checkForStorageEmulator determines if the client is setup for use with diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go new file mode 100644 index 00000000..5b4a6514 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go @@ -0,0 +1,201 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "net/http" + "net/url" + "strconv" + "time" +) + +// lease constants. +const ( + leaseHeaderPrefix = "x-ms-lease-" + headerLeaseID = "x-ms-lease-id" + leaseAction = "x-ms-lease-action" + leaseBreakPeriod = "x-ms-lease-break-period" + leaseDuration = "x-ms-lease-duration" + leaseProposedID = "x-ms-proposed-lease-id" + leaseTime = "x-ms-lease-time" + + acquireLease = "acquire" + renewLease = "renew" + changeLease = "change" + releaseLease = "release" + breakLease = "break" +) + +// leasePut is common PUT code for the various acquire/release/break etc functions. +func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) { + params := url.Values{"comp": {"lease"}} + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return nil, err + } + defer drainRespBody(resp) + + if err := checkRespCode(resp, []int{expectedStatus}); err != nil { + return nil, err + } + + return resp.Header, nil +} + +// LeaseOptions includes options for all operations regarding leasing blobs +type LeaseOptions struct { + Timeout uint + Origin string `header:"Origin"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// AcquireLease creates a lease for a blob +// returns leaseID acquired +// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum +// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = acquireLease + + if leaseTimeInSeconds == -1 { + // Do nothing, but don't trigger the following clauses. + } else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" { + leaseTimeInSeconds = 60 + } else if leaseTimeInSeconds < 15 { + leaseTimeInSeconds = 15 + } + + headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) + + if proposedLeaseID != "" { + headers[leaseProposedID] = proposedLeaseID + } + + respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options) + if err != nil { + return "", err + } + + returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) + + if returnedLeaseID != "" { + return returnedLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// BreakLease breaks the lease for a blob +// Returns the timeout remaining in the lease in seconds +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = breakLease + return b.breakLeaseCommon(headers, options) +} + +// BreakLeaseWithBreakPeriod breaks the lease for a blob +// breakPeriodInSeconds is used to determine how long until new lease can be created. +// Returns the timeout remaining in the lease in seconds +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = breakLease + headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) + return b.breakLeaseCommon(headers, options) +} + +// breakLeaseCommon is common code for both version of BreakLease (with and without break period) +func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) { + + respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options) + if err != nil { + return 0, err + } + + breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) + if breakTimeoutStr != "" { + breakTimeout, err = strconv.Atoi(breakTimeoutStr) + if err != nil { + return 0, err + } + } + + return breakTimeout, nil +} + +// ChangeLease changes a lease ID for a blob +// Returns the new LeaseID acquired +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = changeLease + headers[headerLeaseID] = currentLeaseID + headers[leaseProposedID] = proposedLeaseID + + respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return "", err + } + + newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) + if newLeaseID != "" { + return newLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// ReleaseLease releases the lease for a blob +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = releaseLease + headers[headerLeaseID] = currentLeaseID + + _, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return err + } + + return nil +} + +// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = renewLease + headers[headerLeaseID] = currentLeaseID + + _, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go new file mode 100644 index 00000000..ce33dcb7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go @@ -0,0 +1,170 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" + "time" +) + +// Message represents an Azure message. +type Message struct { + Queue *Queue + Text string `xml:"MessageText"` + ID string `xml:"MessageId"` + Insertion TimeRFC1123 `xml:"InsertionTime"` + Expiration TimeRFC1123 `xml:"ExpirationTime"` + PopReceipt string `xml:"PopReceipt"` + NextVisible TimeRFC1123 `xml:"TimeNextVisible"` + DequeueCount int `xml:"DequeueCount"` +} + +func (m *Message) buildPath() string { + return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID) +} + +// PutMessageOptions is the set of options can be specified for Put Messsage +// operation. A zero struct does not use any preferences for the request. +type PutMessageOptions struct { + Timeout uint + VisibilityTimeout int + MessageTTL int + RequestID string `header:"x-ms-client-request-id"` +} + +// Put operation adds a new message to the back of the message queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message +func (m *Message) Put(options *PutMessageOptions) error { + query := url.Values{} + headers := m.Queue.qsc.client.getStandardHeaders() + + req := putMessageRequest{MessageText: m.Text} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers["Content-Length"] = strconv.Itoa(nn) + + if options != nil { + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + if options.MessageTTL != 0 { + query.Set("messagettl", strconv.Itoa(options.MessageTTL)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query) + resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + err = checkRespCode(resp, []int{http.StatusCreated}) + if err != nil { + return err + } + err = xmlUnmarshal(resp.Body, m) + if err != nil { + return err + } + return nil +} + +// UpdateMessageOptions is the set of options can be specified for Update Messsage +// operation. A zero struct does not use any preferences for the request. +type UpdateMessageOptions struct { + Timeout uint + VisibilityTimeout int + RequestID string `header:"x-ms-client-request-id"` +} + +// Update operation updates the specified message. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message +func (m *Message) Update(options *UpdateMessageOptions) error { + query := url.Values{} + if m.PopReceipt != "" { + query.Set("popreceipt", m.PopReceipt) + } + + headers := m.Queue.qsc.client.getStandardHeaders() + req := putMessageRequest{MessageText: m.Text} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers["Content-Length"] = strconv.Itoa(nn) + + if options != nil { + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query) + + resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + m.PopReceipt = resp.Header.Get("x-ms-popreceipt") + nextTimeStr := resp.Header.Get("x-ms-time-next-visible") + if nextTimeStr != "" { + nextTime, err := time.Parse(time.RFC1123, nextTimeStr) + if err != nil { + return err + } + m.NextVisible = TimeRFC1123(nextTime) + } + + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +// Delete operation deletes the specified message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx +func (m *Message) Delete(options *QueueServiceOptions) error { + params := url.Values{"popreceipt": {m.PopReceipt}} + headers := m.Queue.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params) + + resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +type putMessageRequest struct { + XMLName xml.Name `xml:"QueueMessage"` + MessageText string `xml:"MessageText"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go new file mode 100644 index 00000000..800adf12 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go @@ -0,0 +1,47 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// MetadataLevel determines if operations should return a paylod, +// and it level of detail. +type MetadataLevel string + +// This consts are meant to help with Odata supported operations +const ( + OdataTypeSuffix = "@odata.type" + + // Types + + OdataBinary = "Edm.Binary" + OdataDateTime = "Edm.DateTime" + OdataGUID = "Edm.Guid" + OdataInt64 = "Edm.Int64" + + // Query options + + OdataFilter = "$filter" + OdataOrderBy = "$orderby" + OdataTop = "$top" + OdataSkip = "$skip" + OdataCount = "$count" + OdataExpand = "$expand" + OdataSelect = "$select" + OdataSearch = "$search" + + EmptyPayload MetadataLevel = "" + NoMetadata MetadataLevel = "application/json;odata=nometadata" + MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata" + FullMetadata MetadataLevel = "application/json;odata=fullmetadata" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go new file mode 100644 index 00000000..7ffd6382 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go @@ -0,0 +1,203 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "time" +) + +// GetPageRangesResponse contains the response fields from +// Get Page Ranges call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type GetPageRangesResponse struct { + XMLName xml.Name `xml:"PageList"` + PageList []PageRange `xml:"PageRange"` +} + +// PageRange contains information about a page of a page blob from +// Get Pages Range call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +var ( + errBlobCopyAborted = errors.New("storage: blob copy is aborted") + errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") +) + +// PutPageOptions includes the options for a put page operation +type PutPageOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"` + IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"` + IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// WriteRange writes a range of pages to a page blob. +// Ranges must be aligned with 512-byte boundaries and chunk must be of size +// multiplies by 512. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page +func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { + if bytes == nil { + return errors.New("bytes cannot be nil") + } + return b.modifyRange(blobRange, bytes, options) +} + +// ClearRange clears the given range in a page blob. +// Ranges must be aligned with 512-byte boundaries and chunk must be of size +// multiplies by 512. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page +func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error { + return b.modifyRange(blobRange, nil, options) +} + +func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { + if blobRange.End < blobRange.Start { + return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") + } + if blobRange.Start%512 != 0 { + return errors.New("the value for rangeStart must be a multiple of 512") + } + if blobRange.End%512 != 511 { + return errors.New("the value for rangeEnd must be a multiple of 512 - 1") + } + + params := url.Values{"comp": {"page"}} + + // default to clear + write := "clear" + var cl uint64 + + // if bytes is not nil then this is an update operation + if bytes != nil { + write = "update" + cl = (blobRange.End - blobRange.Start) + 1 + } + + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-page-write"] = write + headers["x-ms-range"] = blobRange.String() + headers["Content-Length"] = fmt.Sprintf("%v", cl) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusCreated}) +} + +// GetPageRangesOptions includes the options for a get page ranges operation +type GetPageRangesOptions struct { + Timeout uint + Snapshot *time.Time + PreviousSnapshot *time.Time + Range *BlobRange + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPageRanges returns the list of valid page ranges for a page blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges +func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) { + params := url.Values{"comp": {"pagelist"}} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + if options.PreviousSnapshot != nil { + params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot)) + } + if options.Range != nil { + headers["Range"] = options.Range.String() + } + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + var out GetPageRangesResponse + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return out, err + } + defer drainRespBody(resp) + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + return out, err + } + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// PutPageBlob initializes an empty page blob with specified name and maximum +// size in bytes (size must be aligned to a 512-byte boundary). A page blob must +// be created using this method before writing pages. +// +// See CreateBlockBlobFromReader for more info on creating blobs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) PutPageBlob(options *PutBlobOptions) error { + if b.Properties.ContentLength%512 != 0 { + return errors.New("Content length must be aligned to a 512-byte boundary") + } + + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength) + headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypePage) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go index 4031410a..55238ab1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -1,339 +1,436 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "fmt" + "io" "net/http" "net/url" "strconv" - "strings" + "time" ) const ( // casing is per Golang's http.Header canonicalizing the header names. - approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" - userDefinedMetadataHeaderPrefix = "X-Ms-Meta-" + approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" ) -func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) } -func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) } -func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) } - -type putMessageRequest struct { - XMLName xml.Name `xml:"QueueMessage"` - MessageText string `xml:"MessageText"` +// QueueAccessPolicy represents each access policy in the queue ACL. +type QueueAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanAdd bool + CanUpdate bool + CanProcess bool } -// PutMessageParameters is the set of options can be specified for Put Messsage -// operation. A zero struct does not use any preferences for the request. -type PutMessageParameters struct { - VisibilityTimeout int - MessageTTL int +// QueuePermissions represents the queue ACLs. +type QueuePermissions struct { + AccessPolicies []QueueAccessPolicy } -func (p PutMessageParameters) getParameters() url.Values { - out := url.Values{} - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - if p.MessageTTL != 0 { - out.Set("messagettl", strconv.Itoa(p.MessageTTL)) - } - return out +// SetQueuePermissionOptions includes options for a set queue permissions operation +type SetQueuePermissionOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` } -// GetMessagesParameters is the set of options can be specified for Get -// Messsages operation. A zero struct does not use any preferences for the -// request. -type GetMessagesParameters struct { - NumOfMessages int - VisibilityTimeout int +// Queue represents an Azure queue. +type Queue struct { + qsc *QueueServiceClient + Name string + Metadata map[string]string + AproxMessageCount uint64 } -func (p GetMessagesParameters) getParameters() url.Values { - out := url.Values{} - if p.NumOfMessages != 0 { - out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) - } - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - return out +func (q *Queue) buildPath() string { + return fmt.Sprintf("/%s", q.Name) } -// PeekMessagesParameters is the set of options can be specified for Peek -// Messsage operation. A zero struct does not use any preferences for the -// request. -type PeekMessagesParameters struct { - NumOfMessages int +func (q *Queue) buildPathMessages() string { + return fmt.Sprintf("%s/messages", q.buildPath()) } -func (p PeekMessagesParameters) getParameters() url.Values { - out := url.Values{"peekonly": {"true"}} // Required for peek operation - if p.NumOfMessages != 0 { - out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) - } - return out +// QueueServiceOptions includes options for some queue service operations +type QueueServiceOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` } -// UpdateMessageParameters is the set of options can be specified for Update Messsage -// operation. A zero struct does not use any preferences for the request. -type UpdateMessageParameters struct { - PopReceipt string - VisibilityTimeout int -} - -func (p UpdateMessageParameters) getParameters() url.Values { - out := url.Values{} - if p.PopReceipt != "" { - out.Set("popreceipt", p.PopReceipt) - } - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - return out -} - -// GetMessagesResponse represents a response returned from Get Messages -// operation. -type GetMessagesResponse struct { - XMLName xml.Name `xml:"QueueMessagesList"` - QueueMessagesList []GetMessageResponse `xml:"QueueMessage"` -} - -// GetMessageResponse represents a QueueMessage object returned from Get -// Messages operation response. -type GetMessageResponse struct { - MessageID string `xml:"MessageId"` - InsertionTime string `xml:"InsertionTime"` - ExpirationTime string `xml:"ExpirationTime"` - PopReceipt string `xml:"PopReceipt"` - TimeNextVisible string `xml:"TimeNextVisible"` - DequeueCount int `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// PeekMessagesResponse represents a response returned from Get Messages -// operation. -type PeekMessagesResponse struct { - XMLName xml.Name `xml:"QueueMessagesList"` - QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"` -} - -// PeekMessageResponse represents a QueueMessage object returned from Peek -// Messages operation response. -type PeekMessageResponse struct { - MessageID string `xml:"MessageId"` - InsertionTime string `xml:"InsertionTime"` - ExpirationTime string `xml:"ExpirationTime"` - DequeueCount int `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// QueueMetadataResponse represents user defined metadata and queue -// properties on a specific queue. +// Create operation creates a queue under the given account. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx -type QueueMetadataResponse struct { - ApproximateMessageCount int - UserDefinedMetadata map[string]string +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4 +func (q *Queue) Create(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusCreated}) +} + +// Delete operation permanently deletes the specified queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3 +func (q *Queue) Delete(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +// Exists returns true if a queue with given name exists. +func (q *Queue) Exists() (bool, error) { + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) + resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusOK, nil + } + err = getErrorFromResponse(resp) + } + return false, err } // SetMetadata operation sets user-defined metadata on the specified queue. // Metadata is associated with the queue as name-value pairs. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx -func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) - metadata = c.client.protectUserAgent(metadata) - headers := c.client.getStandardHeaders() - for k, v := range metadata { - headers[userDefinedMetadataHeaderPrefix+k] = v - } +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata +func (q *Queue) SetMetadata(options *QueueServiceOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := q.qsc.client.getStandardHeaders() + headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) - resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth) + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) if err != nil { return err } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusNoContent}) } // GetMetadata operation retrieves user-defined metadata and queue // properties on the specified queue. Metadata is associated with // the queue as name-values pairs. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata // // Because the way Golang's http client (and http.Header in particular) // canonicalize header names, the returned metadata names would always // be all lower case. -func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) { - qm := QueueMetadataResponse{} - qm.UserDefinedMetadata = make(map[string]string) - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) - headers := c.client.getStandardHeaders() - resp, err := c.client.exec(http.MethodGet, uri, headers, nil, c.auth) - if err != nil { - return qm, err - } - defer readAndCloseBody(resp.body) +func (q *Queue) GetMetadata(options *QueueServiceOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := q.qsc.client.getStandardHeaders() - for k, v := range resp.headers { - if len(v) != 1 { - return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k) - } - - value := v[0] - - if k == approximateMessagesCountHeader { - qm.ApproximateMessageCount, err = strconv.Atoi(value) - if err != nil { - return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value) - } - } else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) { - name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix) - qm.UserDefinedMetadata[strings.ToLower(name)] = value + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { + return err + } + + aproxMessagesStr := resp.Header.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader)) + if aproxMessagesStr != "" { + aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64) + if err != nil { + return err } + q.AproxMessageCount = aproxMessages } - return qm, checkRespCode(resp.statusCode, []int{http.StatusOK}) + q.Metadata = getMetadataFromHeaders(resp.Header) + return nil } -// CreateQueue operation creates a queue under the given account. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx -func (c QueueServiceClient) CreateQueue(name string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - headers := c.client.getStandardHeaders() - resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth) - if err != nil { - return err +// GetMessageReference returns a message object with the specified text. +func (q *Queue) GetMessageReference(text string) *Message { + return &Message{ + Queue: q, + Text: text, } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } -// DeleteQueue operation permanently deletes the specified queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx -func (c QueueServiceClient) DeleteQueue(name string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +// GetMessagesOptions is the set of options can be specified for Get +// Messsages operation. A zero struct does not use any preferences for the +// request. +type GetMessagesOptions struct { + Timeout uint + NumOfMessages int + VisibilityTimeout int + RequestID string `header:"x-ms-client-request-id"` } -// QueueExists returns true if a queue with given name exists. -func (c QueueServiceClient) QueueExists(name string) (bool, error) { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}}) - resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth) - if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { - return resp.statusCode == http.StatusOK, nil - } - - return false, err -} - -// PutMessage operation adds a new message to the back of the message queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx -func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - req := putMessageRequest{MessageText: message} - body, nn, err := xmlMarshal(req) - if err != nil { - return err - } - headers := c.client.getStandardHeaders() - headers["Content-Length"] = strconv.Itoa(nn) - resp, err := c.client.exec(http.MethodPost, uri, headers, body, c.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// ClearMessages operation deletes all messages from the specified queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx -func (c QueueServiceClient) ClearMessages(queue string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{}) - resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return err - } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +type messages struct { + XMLName xml.Name `xml:"QueueMessagesList"` + Messages []Message `xml:"QueueMessage"` } // GetMessages operation retrieves one or more messages from the front of the // queue. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx -func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) { - var r GetMessagesResponse - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return r, err +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages +func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) { + query := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + if options.NumOfMessages != 0 { + query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) + } + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &r) - return r, err + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return []Message{}, err + } + defer resp.Body.Close() + + var out messages + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return []Message{}, err + } + for i := range out.Messages { + out.Messages[i].Queue = q + } + return out.Messages, err +} + +// PeekMessagesOptions is the set of options can be specified for Peek +// Messsage operation. A zero struct does not use any preferences for the +// request. +type PeekMessagesOptions struct { + Timeout uint + NumOfMessages int + RequestID string `header:"x-ms-client-request-id"` } // PeekMessages retrieves one or more messages from the front of the queue, but // does not alter the visibility of the message. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx -func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) { - var r PeekMessagesResponse - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth) - if err != nil { - return r, err +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages +func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) { + query := url.Values{"peekonly": {"true"}} // Required for peek operation + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + if options.NumOfMessages != 0 { + query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &r) - return r, err + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return []Message{}, err + } + defer resp.Body.Close() + + var out messages + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return []Message{}, err + } + for i := range out.Messages { + out.Messages[i].Queue = q + } + return out.Messages, err } -// DeleteMessage operation deletes the specified message. +// ClearMessages operation deletes all messages from the specified queue. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx -func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error { - uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{ - "popreceipt": {popReceipt}}) - resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages +func (q *Queue) ClearMessages(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params) + + resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) if err != nil { return err } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusNoContent}) } -// UpdateMessage operation deletes the specified message. -// -// See https://msdn.microsoft.com/en-us/library/azure/hh452234.aspx -func (c QueueServiceClient) UpdateMessage(queue string, messageID string, message string, params UpdateMessageParameters) error { - uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), params.getParameters()) - req := putMessageRequest{MessageText: message} - body, nn, err := xmlMarshal(req) +// SetPermissions sets up queue permissions +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl +func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error { + body, length, err := generateQueueACLpayload(permissions.AccessPolicies) if err != nil { return err } - headers := c.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%d", nn) - resp, err := c.client.exec(http.MethodPut, uri, headers, body, c.auth) + + params := url.Values{ + "comp": {"acl"}, + } + headers := q.qsc.client.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(length) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth) if err != nil { return err } - defer readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, qapd := range policies { + permission := qapd.generateQueuePermissions() + signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) { + // generate the permissions string (raup). + // still want the end user API to have bool flags. + permissions = "" + + if qapd.CanRead { + permissions += "r" + } + + if qapd.CanAdd { + permissions += "a" + } + + if qapd.CanUpdate { + permissions += "u" + } + + if qapd.CanProcess { + permissions += "p" + } + + return permissions +} + +// GetQueuePermissionOptions includes options for a get queue permissions operation +type GetQueuePermissionOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl +// If timeout is 0 then it will not be passed to Azure +func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) { + params := url.Values{ + "comp": {"acl"}, + } + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var ap AccessPolicy + err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return buildQueueAccessPolicy(ap, &resp.Header), nil +} + +func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions { + permissions := QueuePermissions{ + AccessPolicies: []QueueAccessPolicy{}, + } + + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + qapd := QueueAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a") + qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") + qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p") + + permissions.AccessPolicies = append(permissions.AccessPolicies, qapd) + } + return &permissions } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go new file mode 100644 index 00000000..28d9ab93 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go @@ -0,0 +1,146 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// QueueSASOptions are options to construct a blob SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type QueueSASOptions struct { + QueueSASPermissions + SASOptions +} + +// QueueSASPermissions includes the available permissions for +// a queue SAS URI. +type QueueSASPermissions struct { + Read bool + Add bool + Update bool + Process bool +} + +func (q QueueSASPermissions) buildString() string { + permissions := "" + + if q.Read { + permissions += "r" + } + if q.Add { + permissions += "a" + } + if q.Update { + permissions += "u" + } + if q.Process { + permissions += "p" + } + return permissions +} + +// GetSASURI creates an URL to the specified queue which contains the Shared +// Access Signature with specified permissions and expiration time. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) { + canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true) + if err != nil { + return "", err + } + + // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. + // It must include the service name (blob, table, queue or file) for version 2015-02-21 or + // later, the storage account name, and the resource name, and must be URL-decoded. + // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + + signedStart := "" + if options.Start != (time.Time{}) { + signedStart = options.Start.UTC().Format(time.RFC3339) + } + signedExpiry := options.Expiry.UTC().Format(time.RFC3339) + + protocols := "https,http" + if options.UseHTTPS { + protocols = "https" + } + + permissions := options.QueueSASPermissions.buildString() + stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier) + if err != nil { + return "", err + } + + sig := q.qsc.client.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {q.qsc.client.apiVersion}, + "se": {signedExpiry}, + "sp": {permissions}, + "sig": {sig}, + } + + if q.qsc.client.apiVersion >= "2015-04-05" { + sasParams.Add("spr", protocols) + addQueryParameter(sasParams, "sip", options.IP) + } + + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil) + sasURL, err := url.Parse(uri) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) { + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/queue" + canonicalizedResource + } + + // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 + if signedVersion >= "2015-04-05" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", + signedPermissions, + signedStart, + signedExpiry, + canonicalizedResource, + signedIdentifier, + signedIP, + protocols, + signedVersion), nil + + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil + } + + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go index c2614133..29febe14 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // QueueServiceClient contains operations for Microsoft Azure Queue Storage // Service. type QueueServiceClient struct { @@ -9,12 +23,20 @@ type QueueServiceClient struct { // GetServiceProperties gets the properties of your storage account's queue service. // See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties -func (c *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return c.client.getServiceProperties(queueServiceName, c.auth) +func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return q.client.getServiceProperties(queueServiceName, q.auth) } // SetServiceProperties sets the properties of your storage account's queue service. // See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties -func (c *QueueServiceClient) SetServiceProperties(props ServiceProperties) error { - return c.client.setServiceProperties(props, queueServiceName, c.auth) +func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error { + return q.client.setServiceProperties(props, queueServiceName, q.auth) +} + +// GetQueueReference returns a Container object for the specified queue name. +func (q *QueueServiceClient) GetQueueReference(name string) *Queue { + return &Queue{ + qsc: q, + Name: name, + } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go index e190097e..cf75a265 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "net/http" @@ -30,9 +44,15 @@ func (s *Share) buildPath() string { // Create this share under the associated account. // If a share with the same name already exists, the operation fails. // -// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx -func (s *Share) Create() error { - headers, err := s.fsc.createResource(s.buildPath(), resourceShare, nil, mergeMDIntoExtraHeaders(s.Metadata, nil), []int{http.StatusCreated}) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share +func (s *Share) Create(options *FileRequestOptions) error { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + params := prepareOptions(options) + headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated}) if err != nil { return err } @@ -45,17 +65,23 @@ func (s *Share) Create() error { // it does not exist. Returns true if the share is newly created or false if // the share already exists. // -// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx -func (s *Share) CreateIfNotExists() (bool, error) { - resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, nil, nil) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share +func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + params := prepareOptions(options) + resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders) if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - if resp.statusCode == http.StatusCreated { - s.updateEtagAndLastModified(resp.headers) + defer drainRespBody(resp) + if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { + if resp.StatusCode == http.StatusCreated { + s.updateEtagAndLastModified(resp.Header) return true, nil } - return false, s.FetchAttributes() + return false, s.FetchAttributes(nil) } } @@ -66,20 +92,20 @@ func (s *Share) CreateIfNotExists() (bool, error) { // and directories contained within it are later deleted during garbage // collection. If the share does not exist the operation fails // -// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx -func (s *Share) Delete() error { - return s.fsc.deleteResource(s.buildPath(), resourceShare) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share +func (s *Share) Delete(options *FileRequestOptions) error { + return s.fsc.deleteResource(s.buildPath(), resourceShare, options) } // DeleteIfExists operation marks this share for deletion if it exists. // -// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx -func (s *Share) DeleteIfExists() (bool, error) { - resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share +func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options) if resp != nil { - defer readAndCloseBody(resp.body) - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil + defer drainRespBody(resp) + if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusAccepted, nil } } return false, err @@ -97,8 +123,10 @@ func (s *Share) Exists() (bool, error) { } // FetchAttributes retrieves metadata and properties for this share. -func (s *Share) FetchAttributes() error { - headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, http.MethodHead) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties +func (s *Share) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead) if err != nil { return err } @@ -130,9 +158,9 @@ func (s *Share) ServiceClient() *FileServiceClient { // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (s *Share) SetMetadata() error { - headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil)) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata +func (s *Share) SetMetadata(options *FileRequestOptions) error { + headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options) if err != nil { return err } @@ -148,15 +176,17 @@ func (s *Share) SetMetadata() error { // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx -func (s *Share) SetProperties() error { - if s.Properties.Quota < 1 || s.Properties.Quota > 5120 { - return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties +func (s *Share) SetProperties(options *FileRequestOptions) error { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + if s.Properties.Quota > 5120 { + return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) + } + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) } - headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, map[string]string{ - "x-ms-share-quota": strconv.Itoa(s.Properties.Quota), - }) + headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options) if err != nil { return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go index bee1c31a..056ab398 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "strings" "time" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go index 817560b7..c338975a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go @@ -1,9 +1,23 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( - "fmt" "net/http" "net/url" + "strconv" ) // ServiceProperties represents the storage account service properties @@ -63,14 +77,14 @@ func (c Client) getServiceProperties(service string, auth authentication) (*Serv if err != nil { return nil, err } - defer resp.body.Close() + defer resp.Body.Close() - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { return nil, err } var out ServiceProperties - err = xmlUnmarshal(resp.body, &out) + err = xmlUnmarshal(resp.Body, &out) if err != nil { return nil, err } @@ -106,13 +120,12 @@ func (c Client) setServiceProperties(props ServiceProperties, service string, au } headers := c.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", length) + headers["Content-Length"] = strconv.Itoa(length) resp, err := c.exec(http.MethodPut, uri, headers, body, auth) if err != nil { return err } - defer readAndCloseBody(resp.body) - - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusAccepted}) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go index 4123746e..22d9b4f5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" @@ -9,20 +23,19 @@ import ( "net/http" "net/url" "strconv" + "strings" "time" ) -// AzureTable is the typedef of the Azure Table name -type AzureTable string - const ( - tablesURIPath = "/Tables" + tablesURIPath = "/Tables" + nextTableQueryParameter = "NextTableName" + headerNextPartitionKey = "x-ms-continuation-NextPartitionKey" + headerNextRowKey = "x-ms-continuation-NextRowKey" + nextPartitionKeyQueryParameter = "NextPartitionKey" + nextRowKeyQueryParameter = "NextRowKey" ) -type createTableRequest struct { - TableName string `json:"TableName"` -} - // TableAccessPolicy are used for SETTING table policies type TableAccessPolicy struct { ID string @@ -34,140 +47,231 @@ type TableAccessPolicy struct { CanDelete bool } -func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) } - -func (c *TableServiceClient) getStandardHeaders() map[string]string { - return map[string]string{ - "x-ms-version": "2015-02-21", - "x-ms-date": currentTimeRfc1123Formatted(), - "Accept": "application/json;odata=nometadata", - "Accept-Charset": "UTF-8", - "Content-Type": "application/json", - userAgentHeader: c.client.userAgent, - } +// Table represents an Azure table. +type Table struct { + tsc *TableServiceClient + Name string `json:"TableName"` + OdataEditLink string `json:"odata.editLink"` + OdataID string `json:"odata.id"` + OdataMetadata string `json:"odata.metadata"` + OdataType string `json:"odata.type"` } -// QueryTables returns the tables created in the -// *TableServiceClient storage account. -func (c *TableServiceClient) QueryTables() ([]AzureTable, error) { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) +// EntityQueryResult contains the response from +// ExecuteQuery and ExecuteQueryNextResults functions. +type EntityQueryResult struct { + OdataMetadata string `json:"odata.metadata"` + Entities []*Entity `json:"value"` + QueryNextLink + table *Table +} - headers := c.getStandardHeaders() - headers["Content-Length"] = "0" +type continuationToken struct { + NextPartitionKey string + NextRowKey string +} - resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth) +func (t *Table) buildPath() string { + return fmt.Sprintf("/%s", t.Name) +} + +func (t *Table) buildSpecificPath() string { + return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name) +} + +// Get gets the referenced table. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities +func (t *Table) Get(timeout uint, ml MetadataLevel) error { + if ml == EmptyPayload { + return errEmptyPayload + } + + query := url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + } + headers := t.tsc.client.getStandardHeaders() + headers[headerAccept] = string(ml) + + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query) + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) if err != nil { - return nil, err + return err } - defer resp.body.Close() + defer resp.Body.Close() - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - ioutil.ReadAll(resp.body) - return nil, err + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + return err } - buf := new(bytes.Buffer) - if _, err := buf.ReadFrom(resp.body); err != nil { - return nil, err + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err } - - var respArray queryTablesResponse - if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil { - return nil, err + err = json.Unmarshal(respBody, t) + if err != nil { + return err } - - s := make([]AzureTable, len(respArray.TableName)) - for i, elem := range respArray.TableName { - s[i] = AzureTable(elem.TableName) - } - - return s, nil + return nil } -// CreateTable creates the table given the specific -// name. This function fails if the name is not compliant +// Create creates the referenced table. +// This function fails if the name is not compliant // with the specification or the tables already exists. -func (c *TableServiceClient) CreateTable(table AzureTable) error { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) +// ml determines the level of detail of metadata in the operation response, +// or no data at all. +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table +func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error { + uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + }) - headers := c.getStandardHeaders() - - req := createTableRequest{TableName: string(table)} + type createTableRequest struct { + TableName string `json:"TableName"` + } + req := createTableRequest{TableName: t.Name} buf := new(bytes.Buffer) - if err := json.NewEncoder(buf).Encode(req); err != nil { return err } - headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) - - resp, err := c.client.execInternalJSON(http.MethodPost, uri, headers, buf, c.auth) + headers := t.tsc.client.getStandardHeaders() + headers = addReturnContentHeaders(headers, ml) + headers = addBodyRelatedHeaders(headers, buf.Len()) + headers = options.addToHeaders(headers) + resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth) if err != nil { return err } - defer readAndCloseBody(resp.body) + defer resp.Body.Close() - if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { - return err + if ml == EmptyPayload { + if err := checkRespCode(resp, []int{http.StatusNoContent}); err != nil { + return err + } + } else { + if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil { + return err + } + } + + if ml != EmptyPayload { + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + err = json.Unmarshal(data, t) + if err != nil { + return err + } } return nil } -// DeleteTable deletes the table given the specific -// name. This function fails if the table is not present. -// Be advised: DeleteTable deletes all the entries -// that may be present. -func (c *TableServiceClient) DeleteTable(table AzureTable) error { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) - uri += fmt.Sprintf("('%s')", string(table)) +// Delete deletes the referenced table. +// This function fails if the table is not present. +// Be advised: Delete deletes all the entries that may be present. +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table +func (t *Table) Delete(timeout uint, options *TableOptions) error { + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{ + "timeout": {strconv.Itoa(int(timeout))}, + }) - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - - resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth) + headers := t.tsc.client.getStandardHeaders() + headers = addReturnContentHeaders(headers, EmptyPayload) + headers = options.addToHeaders(headers) + resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth) if err != nil { return err } - defer readAndCloseBody(resp.body) + defer drainRespBody(resp) - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - - } - return nil + return checkRespCode(resp, []int{http.StatusNoContent}) } -// SetTablePermissions sets up table ACL permissions as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Table-ACL -func (c *TableServiceClient) SetTablePermissions(table AzureTable, policies []TableAccessPolicy, timeout uint) (err error) { - params := url.Values{"comp": {"acl"}} +// QueryOptions includes options for a query entities operation. +// Top, filter and select are OData query options. +type QueryOptions struct { + Top uint + Filter string + Select []string + RequestID string +} - if timeout > 0 { - params.Add("timeout", fmt.Sprint(timeout)) +func (options *QueryOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + if options.Top > 0 { + query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) + } + if options.Filter != "" { + query.Add(OdataFilter, options.Filter) + } + if len(options.Select) > 0 { + query.Add(OdataSelect, strings.Join(options.Select, ",")) + } + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + } + return query, headers +} + +// QueryEntities returns the entities in the table. +// You can use query options defined by the OData Protocol specification. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) { + if ml == EmptyPayload { + return nil, errEmptyPayload + } + query, headers := options.getParameters() + query = addTimeout(query, timeout) + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query) + return t.queryEntities(uri, headers, ml) +} + +// NextResults returns the next page of results +// from a QueryEntities or NextResults operation. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination +func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) { + if eqr == nil { + return nil, errNilPreviousResult + } + if eqr.NextLink == nil { + return nil, errNilNextLink + } + headers := options.addToHeaders(map[string]string{}) + return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml) +} + +// SetPermissions sets up table ACL permissions +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL +func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error { + params := url.Values{"comp": {"acl"}, + "timeout": {strconv.Itoa(int(timeout))}, } - uri := c.client.getEndpoint(tableServiceName, string(table), params) - headers := c.client.getStandardHeaders() + uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) + headers := t.tsc.client.getStandardHeaders() + headers = options.addToHeaders(headers) - body, length, err := generateTableACLPayload(policies) + body, length, err := generateTableACLPayload(tap) if err != nil { return err } - headers["Content-Length"] = fmt.Sprintf("%v", length) + headers["Content-Length"] = strconv.Itoa(length) - resp, err := c.client.execInternalJSON(http.MethodPut, uri, headers, body, c.auth) + resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth) if err != nil { return err } - defer readAndCloseBody(resp.body) + defer drainRespBody(resp) - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - return nil + return checkRespCode(resp, []int{http.StatusNoContent}) } func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { @@ -182,38 +286,99 @@ func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, erro return xmlMarshal(sil) } -// GetTablePermissions gets the table ACL permissions, as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-acl -func (c *TableServiceClient) GetTablePermissions(table AzureTable, timeout int) (permissionResponse []TableAccessPolicy, err error) { - params := url.Values{"comp": {"acl"}} - - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) +// GetPermissions gets the table ACL permissions +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl +func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) { + params := url.Values{"comp": {"acl"}, + "timeout": {strconv.Itoa(int(timeout))}, } - uri := c.client.getEndpoint(tableServiceName, string(table), params) - headers := c.client.getStandardHeaders() - resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth) + uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) + headers := t.tsc.client.getStandardHeaders() + headers = options.addToHeaders(headers) + + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) if err != nil { return nil, err } - defer resp.body.Close() + defer resp.Body.Close() - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - ioutil.ReadAll(resp.body) + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { return nil, err } var ap AccessPolicy - err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) + err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) if err != nil { return nil, err } - out := updateTableAccessPolicy(ap) - return out, nil + return updateTableAccessPolicy(ap), nil +} + +func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) { + headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders()) + if ml != EmptyPayload { + headers[headerAccept] = string(ml) + } + + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + return nil, err + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var entities EntityQueryResult + err = json.Unmarshal(data, &entities) + if err != nil { + return nil, err + } + + for i := range entities.Entities { + entities.Entities[i].Table = t + } + entities.table = t + + contToken := extractContinuationTokenFromHeaders(resp.Header) + if contToken == nil { + entities.NextLink = nil + } else { + originalURI, err := url.Parse(uri) + if err != nil { + return nil, err + } + v := originalURI.Query() + v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey) + v.Set(nextRowKeyQueryParameter, contToken.NextRowKey) + newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v) + entities.NextLink = &newURI + entities.ml = ml + } + + return &entities, nil +} + +func extractContinuationTokenFromHeaders(h http.Header) *continuationToken { + ct := continuationToken{ + NextPartitionKey: h.Get(headerNextPartitionKey), + NextRowKey: h.Get(headerNextRowKey), + } + + if ct.NextPartitionKey != "" && ct.NextRowKey != "" { + return &ct + } + return nil } func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { - out := []TableAccessPolicy{} + taps := []TableAccessPolicy{} for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { tap := TableAccessPolicy{ ID: policy.ID, @@ -225,9 +390,9 @@ func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") - out = append(out, tap) + taps = append(taps, tap) } - return out + return taps } func generateTablePermissions(tap *TableAccessPolicy) (permissions string) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go new file mode 100644 index 00000000..a2159e29 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go @@ -0,0 +1,328 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "sort" + "strings" + + "github.com/marstr/guid" +) + +// Operation type. Insert, Delete, Replace etc. +type Operation int + +// consts for batch operations. +const ( + InsertOp = Operation(1) + DeleteOp = Operation(2) + ReplaceOp = Operation(3) + MergeOp = Operation(4) + InsertOrReplaceOp = Operation(5) + InsertOrMergeOp = Operation(6) +) + +// BatchEntity used for tracking Entities to operate on and +// whether operations (replace/merge etc) should be forced. +// Wrapper for regular Entity with additional data specific for the entity. +type BatchEntity struct { + *Entity + Force bool + Op Operation +} + +// TableBatch stores all the entities that will be operated on during a batch process. +// Entities can be inserted, replaced or deleted. +type TableBatch struct { + BatchEntitySlice []BatchEntity + + // reference to table we're operating on. + Table *Table +} + +// defaultChangesetHeaders for changeSets +var defaultChangesetHeaders = map[string]string{ + "Accept": "application/json;odata=minimalmetadata", + "Content-Type": "application/json", + "Prefer": "return-no-content", +} + +// NewBatch return new TableBatch for populating. +func (t *Table) NewBatch() *TableBatch { + return &TableBatch{ + Table: t, + } +} + +// InsertEntity adds an entity in preparation for a batch insert. +func (t *TableBatch) InsertEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace. +func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag +func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) { + t.InsertOrReplaceEntity(entity, true) +} + +// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge. +func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag +func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) { + t.InsertOrMergeEntity(entity, true) +} + +// ReplaceEntity adds an entity in preparation for a batch replace. +func (t *TableBatch) ReplaceEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// DeleteEntity adds an entity in preparation for a batch delete +func (t *TableBatch) DeleteEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag +func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) { + t.DeleteEntity(entity, true) +} + +// MergeEntity adds an entity in preparation for a batch merge +func (t *TableBatch) MergeEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: MergeOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// ExecuteBatch executes many table operations in one request to Azure. +// The operations can be combinations of Insert, Delete, Replace and Merge +// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses +// the changesets. +// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions +func (t *TableBatch) ExecuteBatch() error { + + // Using `github.com/marstr/guid` is in response to issue #947 (https://github.com/Azure/azure-sdk-for-go/issues/947). + id, err := guid.NewGUIDs(guid.CreationStrategyVersion1) + if err != nil { + return err + } + + changesetBoundary := fmt.Sprintf("changeset_%s", id.String()) + uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil) + changesetBody, err := t.generateChangesetBody(changesetBoundary) + if err != nil { + return err + } + + id, err = guid.NewGUIDs(guid.CreationStrategyVersion1) + if err != nil { + return err + } + + boundary := fmt.Sprintf("batch_%s", id.String()) + body, err := generateBody(changesetBody, changesetBoundary, boundary) + if err != nil { + return err + } + + headers := t.Table.tsc.client.getStandardHeaders() + headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary) + + resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp.resp) + + if err = checkRespCode(resp.resp, []int{http.StatusAccepted}); err != nil { + + // check which batch failed. + operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value) + requestID, date, version := getDebugHeaders(resp.resp.Header) + return AzureStorageServiceError{ + StatusCode: resp.resp.StatusCode, + Code: resp.odata.Err.Code, + RequestID: requestID, + Date: date, + APIVersion: version, + Message: operationFailedMessage, + } + } + + return nil +} + +// getFailedOperation parses the original Azure error string and determines which operation failed +// and generates appropriate message. +func (t *TableBatch) getFailedOperation(errorMessage string) string { + // errorMessage consists of "number:string" we just need the number. + sp := strings.Split(errorMessage, ":") + if len(sp) > 1 { + msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage) + return msg + } + + // cant parse the message, just return the original message to client + return errorMessage +} + +// generateBody generates the complete body for the batch request. +func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) { + + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + writer.SetBoundary(boundary) + h := make(textproto.MIMEHeader) + h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary)) + batchWriter, err := writer.CreatePart(h) + if err != nil { + return nil, err + } + batchWriter.Write(changeSetBody.Bytes()) + writer.Close() + return body, nil +} + +// generateChangesetBody generates the individual changesets for the various operations within the batch request. +// There is a changeset for Insert, Delete, Merge etc. +func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) { + + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + writer.SetBoundary(changesetBoundary) + + for _, be := range t.BatchEntitySlice { + t.generateEntitySubset(&be, writer) + } + + writer.Close() + return body, nil +} + +// generateVerb generates the HTTP request VERB required for each changeset. +func generateVerb(op Operation) (string, error) { + switch op { + case InsertOp: + return http.MethodPost, nil + case DeleteOp: + return http.MethodDelete, nil + case ReplaceOp, InsertOrReplaceOp: + return http.MethodPut, nil + case MergeOp, InsertOrMergeOp: + return "MERGE", nil + default: + return "", errors.New("Unable to detect operation") + } +} + +// generateQueryPath generates the query path for within the changesets +// For inserts it will just be a table query path (table name) +// but for other operations (modifying an existing entity) then +// the partition/row keys need to be generated. +func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string { + if op == InsertOp { + return entity.Table.buildPath() + } + return entity.buildPath() +} + +// generateGenericOperationHeaders generates common headers for a given operation. +func generateGenericOperationHeaders(be *BatchEntity) map[string]string { + retval := map[string]string{} + + for k, v := range defaultChangesetHeaders { + retval[k] = v + } + + if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp { + if be.Force || be.Entity.OdataEtag == "" { + retval["If-Match"] = "*" + } else { + retval["If-Match"] = be.Entity.OdataEtag + } + } + + return retval +} + +// generateEntitySubset generates body payload for particular batch entity +func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error { + + h := make(textproto.MIMEHeader) + h.Set(headerContentType, "application/http") + h.Set(headerContentTransferEncoding, "binary") + + verb, err := generateVerb(batchEntity.Op) + if err != nil { + return err + } + + genericOpHeadersMap := generateGenericOperationHeaders(batchEntity) + queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity) + uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil) + + operationWriter, err := writer.CreatePart(h) + if err != nil { + return err + } + + urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri) + operationWriter.Write([]byte(urlAndVerb)) + writeHeaders(genericOpHeadersMap, &operationWriter) + operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body. + + // delete operation doesn't need a body. + if batchEntity.Op != DeleteOp { + //var e Entity = batchEntity.Entity + body, err := json.Marshal(batchEntity.Entity) + if err != nil { + return err + } + operationWriter.Write(body) + } + + return nil +} + +func writeHeaders(h map[string]string, writer *io.Writer) { + // This way it is guaranteed the headers will be written in a sorted order + var keys []string + for k := range h { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + (*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k]))) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go deleted file mode 100644 index 1758d9f3..00000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go +++ /dev/null @@ -1,345 +0,0 @@ -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "reflect" -) - -// Annotating as secure for gas scanning -/* #nosec */ -const ( - partitionKeyNode = "PartitionKey" - rowKeyNode = "RowKey" - tag = "table" - tagIgnore = "-" - continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey" - continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey" - maxTopParameter = 1000 -) - -type queryTablesResponse struct { - TableName []struct { - TableName string `json:"TableName"` - } `json:"value"` -} - -const ( - tableOperationTypeInsert = iota - tableOperationTypeUpdate = iota - tableOperationTypeMerge = iota - tableOperationTypeInsertOrReplace = iota - tableOperationTypeInsertOrMerge = iota -) - -type tableOperation int - -// TableEntity interface specifies -// the functions needed to support -// marshaling and unmarshaling into -// Azure Tables. The struct must only contain -// simple types because Azure Tables do not -// support hierarchy. -type TableEntity interface { - PartitionKey() string - RowKey() string - SetPartitionKey(string) error - SetRowKey(string) error -} - -// ContinuationToken is an opaque (ie not useful to inspect) -// struct that Get... methods can return if there are more -// entries to be returned than the ones already -// returned. Just pass it to the same function to continue -// receiving the remaining entries. -type ContinuationToken struct { - NextPartitionKey string - NextRowKey string -} - -type getTableEntriesResponse struct { - Elements []map[string]interface{} `json:"value"` -} - -// QueryTableEntities queries the specified table and returns the unmarshaled -// entities of type retType. -// top parameter limits the returned entries up to top. Maximum top -// allowed by Azure API is 1000. In case there are more than top entries to be -// returned the function will return a non nil *ContinuationToken. You can call the -// same function again passing the received ContinuationToken as previousContToken -// parameter in order to get the following entries. The query parameter -// is the odata query. To retrieve all the entries pass the empty string. -// The function returns a pointer to a TableEntity slice, the *ContinuationToken -// if there are more entries to be returned and an error in case something went -// wrong. -// -// Example: -// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "") -func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) { - if top > maxTopParameter { - return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top) - } - - uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{}) - uri += fmt.Sprintf("?$top=%d", top) - if query != "" { - uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query)) - } - - if previousContToken != nil { - uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey) - } - - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - - resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth) - - if err != nil { - return nil, nil, err - } - - contToken := extractContinuationTokenFromHeaders(resp.headers) - - defer resp.body.Close() - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, contToken, err - } - - retEntries, err := deserializeEntity(retType, resp.body) - if err != nil { - return nil, contToken, err - } - - return retEntries, contToken, nil -} - -// InsertEntity inserts an entity in the specified table. -// The function fails if there is an entity with the same -// PartitionKey and RowKey in the table. -func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error { - if sc, err := c.execTable(table, entity, false, http.MethodPost); err != nil { - return checkRespCode(sc, []int{http.StatusCreated}) - } - - return nil -} - -func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) { - uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) - if specifyKeysInURL { - uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) - } - - headers := c.getStandardHeaders() - - var buf bytes.Buffer - - if err := injectPartitionAndRowKeys(entity, &buf); err != nil { - return 0, err - } - - headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) - - resp, err := c.client.execInternalJSON(method, uri, headers, &buf, c.auth) - - if err != nil { - return 0, err - } - - defer resp.body.Close() - - return resp.statusCode, nil -} - -// UpdateEntity updates the contents of an entity with the -// one passed as parameter. The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error { - if sc, err := c.execTable(table, entity, true, http.MethodPut); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return nil -} - -// MergeEntity merges the contents of an entity with the -// one passed as parameter. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error { - if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return nil -} - -// DeleteEntityWithoutCheck deletes the entity matching by -// PartitionKey and RowKey. There is no check on IfMatch -// parameter so the entity is always deleted. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error { - return c.DeleteEntity(table, entity, "*") -} - -// DeleteEntity deletes the entity matching by -// PartitionKey, RowKey and ifMatch field. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table or -// the ifMatch is different. -func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error { - uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) - uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) - - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - headers["If-Match"] = ifMatch - - resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth) - - if err != nil { - return err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - - return nil -} - -// InsertOrReplaceEntity inserts an entity in the specified table -// or replaced the existing one. -func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error { - if sc, err := c.execTable(table, entity, true, http.MethodPut); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return nil -} - -// InsertOrMergeEntity inserts an entity in the specified table -// or merges the existing one. -func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error { - if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return nil -} - -func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error { - if err := json.NewEncoder(buf).Encode(entity); err != nil { - return err - } - - dec := make(map[string]interface{}) - if err := json.NewDecoder(buf).Decode(&dec); err != nil { - return err - } - - // Inject PartitionKey and RowKey - dec[partitionKeyNode] = entity.PartitionKey() - dec[rowKeyNode] = entity.RowKey() - - // Remove tagged fields - // The tag is defined in the const section - // This is useful to avoid storing the PartitionKey and RowKey twice. - numFields := reflect.ValueOf(entity).Elem().NumField() - for i := 0; i < numFields; i++ { - f := reflect.ValueOf(entity).Elem().Type().Field(i) - - if f.Tag.Get(tag) == tagIgnore { - // we must look for its JSON name in the dictionary - // as the user can rename it using a tag - jsonName := f.Name - if f.Tag.Get("json") != "" { - jsonName = f.Tag.Get("json") - } - delete(dec, jsonName) - } - } - - buf.Reset() - - if err := json.NewEncoder(buf).Encode(&dec); err != nil { - return err - } - - return nil -} - -func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) { - buf := new(bytes.Buffer) - - var ret getTableEntriesResponse - if err := json.NewDecoder(reader).Decode(&ret); err != nil { - return nil, err - } - - tEntries := make([]TableEntity, len(ret.Elements)) - - for i, entry := range ret.Elements { - - buf.Reset() - if err := json.NewEncoder(buf).Encode(entry); err != nil { - return nil, err - } - - dec := make(map[string]interface{}) - if err := json.NewDecoder(buf).Decode(&dec); err != nil { - return nil, err - } - - var pKey, rKey string - // strip pk and rk - for key, val := range dec { - switch key { - case partitionKeyNode: - pKey = val.(string) - case rowKeyNode: - rKey = val.(string) - } - } - - delete(dec, partitionKeyNode) - delete(dec, rowKeyNode) - - buf.Reset() - if err := json.NewEncoder(buf).Encode(dec); err != nil { - return nil, err - } - - // Create a empty retType instance - tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity) - // Popolate it with the values - if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil { - return nil, err - } - - // Reset PartitionKey and RowKey - if err := tEntries[i].SetPartitionKey(pKey); err != nil { - return nil, err - } - if err := tEntries[i].SetRowKey(rKey); err != nil { - return nil, err - } - } - - return tEntries, nil -} - -func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken { - ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)} - - if ct.NextPartitionKey != "" && ct.NextRowKey != "" { - return &ct - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go index ee5e0a86..1f063a39 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go @@ -1,5 +1,35 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" +) + +const ( + headerAccept = "Accept" + headerEtag = "Etag" + headerPrefer = "Prefer" + headerXmsContinuation = "x-ms-Continuation-NextTableName" +) + // TableServiceClient contains operations for Microsoft Azure Table Storage // Service. type TableServiceClient struct { @@ -7,14 +37,168 @@ type TableServiceClient struct { auth authentication } +// TableOptions includes options for some table operations +type TableOptions struct { + RequestID string +} + +func (options *TableOptions) addToHeaders(h map[string]string) map[string]string { + if options != nil { + h = addToHeaders(h, "x-ms-client-request-id", options.RequestID) + } + return h +} + +// QueryNextLink includes information for getting the next page of +// results in query operations +type QueryNextLink struct { + NextLink *string + ml MetadataLevel +} + // GetServiceProperties gets the properties of your storage account's table service. // See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties -func (c *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) { - return c.client.getServiceProperties(tableServiceName, c.auth) +func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return t.client.getServiceProperties(tableServiceName, t.auth) } // SetServiceProperties sets the properties of your storage account's table service. // See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties -func (c *TableServiceClient) SetServiceProperties(props ServiceProperties) error { - return c.client.setServiceProperties(props, tableServiceName, c.auth) +func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error { + return t.client.setServiceProperties(props, tableServiceName, t.auth) +} + +// GetTableReference returns a Table object for the specified table name. +func (t *TableServiceClient) GetTableReference(name string) *Table { + return &Table{ + tsc: t, + Name: name, + } +} + +// QueryTablesOptions includes options for some table operations +type QueryTablesOptions struct { + Top uint + Filter string + RequestID string +} + +func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + if options.Top > 0 { + query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) + } + if options.Filter != "" { + query.Add(OdataFilter, options.Filter) + } + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + } + return query, headers +} + +// QueryTables returns the tables in the storage account. +// You can use query options defined by the OData Protocol specification. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables +func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) { + query, headers := options.getParameters() + uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query) + return t.queryTables(uri, headers, ml) +} + +// NextResults returns the next page of results +// from a QueryTables or a NextResults operation. +// +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination +func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) { + if tqr == nil { + return nil, errNilPreviousResult + } + if tqr.NextLink == nil { + return nil, errNilNextLink + } + headers := options.addToHeaders(map[string]string{}) + + return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml) +} + +// TableQueryResult contains the response from +// QueryTables and QueryTablesNextResults functions. +type TableQueryResult struct { + OdataMetadata string `json:"odata.metadata"` + Tables []Table `json:"value"` + QueryNextLink + tsc *TableServiceClient +} + +func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) { + if ml == EmptyPayload { + return nil, errEmptyPayload + } + headers = mergeHeaders(headers, t.client.getStandardHeaders()) + headers[headerAccept] = string(ml) + + resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { + return nil, err + } + + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var out TableQueryResult + err = json.Unmarshal(respBody, &out) + if err != nil { + return nil, err + } + + for i := range out.Tables { + out.Tables[i].tsc = t + } + out.tsc = t + + nextLink := resp.Header.Get(http.CanonicalHeaderKey(headerXmsContinuation)) + if nextLink == "" { + out.NextLink = nil + } else { + originalURI, err := url.Parse(uri) + if err != nil { + return nil, err + } + v := originalURI.Query() + v.Set(nextTableQueryParameter, nextLink) + newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v) + out.NextLink = &newURI + out.ml = ml + } + + return &out, nil +} + +func addBodyRelatedHeaders(h map[string]string, length int) map[string]string { + h[headerContentType] = "application/json" + h[headerContentLength] = fmt.Sprintf("%v", length) + h[headerAcceptCharset] = "UTF-8" + return h +} + +func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string { + if ml != EmptyPayload { + h[headerPrefer] = "return-content" + h[headerAccept] = string(ml) + } else { + h[headerPrefer] = "return-no-content" + // From API version 2015-12-11 onwards, Accept header is required + h[headerAccept] = string(NoMetadata) + } + return h } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go index 57ca1b6d..e8a5dcf8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "crypto/hmac" @@ -12,9 +26,37 @@ import ( "net/http" "net/url" "reflect" + "strconv" + "strings" "time" ) +var ( + fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) + accountSASOptions = AccountSASTokenOptions{ + Services: Services{ + Blob: true, + }, + ResourceTypes: ResourceTypes{ + Service: true, + Container: true, + Object: true, + }, + Permissions: Permissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Add: true, + Create: true, + Update: true, + Process: true, + }, + Expiry: fixedTime, + UseHTTPS: true, + } +) + func (c Client) computeHmac256(message string) string { h := hmac.New(sha256.New, c.accountKey) h.Write([]byte(message)) @@ -29,6 +71,10 @@ func timeRfc1123Formatted(t time.Time) string { return t.Format(http.TimeFormat) } +func timeRFC3339Formatted(t time.Time) string { + return t.Format("2006-01-02T15:04:05.0000000Z") +} + func mergeParams(v1, v2 url.Values) url.Values { out := url.Values{} for k, v := range v1 { @@ -76,10 +122,123 @@ func headersFromStruct(v interface{}) map[string]string { value := reflect.ValueOf(v) for i := 0; i < value.NumField(); i++ { key := value.Type().Field(i).Tag.Get("header") - val := value.Field(i).String() - if key != "" && val != "" { - headers[key] = val + if key != "" { + reflectedValue := reflect.Indirect(value.Field(i)) + var val string + if reflectedValue.IsValid() { + switch reflectedValue.Type() { + case reflect.TypeOf(fixedTime): + val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time)) + case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)): + val = strconv.FormatUint(reflectedValue.Uint(), 10) + case reflect.TypeOf(int(0)): + val = strconv.FormatInt(reflectedValue.Int(), 10) + default: + val = reflectedValue.String() + } + } + if val != "" { + headers[key] = val + } } } return headers } + +// merges extraHeaders into headers and returns headers +func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { + for k, v := range extraHeaders { + headers[k] = v + } + return headers +} + +func addToHeaders(h map[string]string, key, value string) map[string]string { + if value != "" { + h[key] = value + } + return h +} + +func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string { + if value != nil { + h = addToHeaders(h, key, timeRfc1123Formatted(*value)) + } + return h +} + +func addTimeout(params url.Values, timeout uint) url.Values { + if timeout > 0 { + params.Add("timeout", fmt.Sprintf("%v", timeout)) + } + return params +} + +func addSnapshot(params url.Values, snapshot *time.Time) url.Values { + if snapshot != nil { + params.Add("snapshot", timeRFC3339Formatted(*snapshot)) + } + return params +} + +func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) { + var out time.Time + var err error + outStr := h.Get(key) + if outStr != "" { + out, err = time.Parse(time.RFC1123, outStr) + if err != nil { + return nil, err + } + } + return &out, nil +} + +// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling +type TimeRFC1123 time.Time + +// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout. +func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var value string + d.DecodeElement(&value, &start) + parse, err := time.Parse(time.RFC1123, value) + if err != nil { + return err + } + *t = TimeRFC1123(parse) + return nil +} + +// MarshalXML marshals using time.RFC1123. +func (t *TimeRFC1123) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(time.Time(*t).Format(time.RFC1123), start) +} + +// returns a map of custom metadata values from the specified HTTP header +func getMetadataFromHeaders(header http.Header) map[string]string { + metadata := make(map[string]string) + for k, v := range header { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["lol"] = content of the last X-Ms-Meta-Lol header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + + if len(metadata) == 0 { + return nil + } + + return metadata +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go deleted file mode 100644 index c25fe337..00000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go +++ /dev/null @@ -1,5 +0,0 @@ -package storage - -var ( - sdkVersion = "0.1.0" -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go new file mode 100644 index 00000000..0bf03cee --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -0,0 +1,21 @@ +package version + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// Number contains the semantic version of this SDK. +const Number = "v16.2.1" diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md index f4c34d0e..9a7b13a3 100644 --- a/vendor/github.com/Azure/go-autorest/README.md +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -1,11 +1,22 @@ # go-autorest -[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) [![Build Status](https://travis-ci.org/Azure/go-autorest.svg?branch=master)](https://travis-ci.org/Azure/go-autorest) [![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) +[![Build Status](https://travis-ci.org/Azure/go-autorest.svg?branch=master)](https://travis-ci.org/Azure/go-autorest) +[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) -## Usage -Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines -and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) -generated Go code. +Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. + +An authentication client tested with Azure Active Directory (AAD) is also +provided in this repo in the package +`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package +is maintained only as part of the Azure Go SDK and is not related to other +"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). + +## Overview + +Package go-autorest implements an HTTP request pipeline suitable for use across +multiple goroutines and provides the shared routines used by packages generated +by [Autorest](https://github.com/Azure/autorest.go). The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, and Responding. A typical pattern is: @@ -129,4 +140,10 @@ go get github.com/Azure/go-autorest/autorest/to See LICENSE file. ----- -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 00000000..7b0c4bc4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,292 @@ +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 00000000..f570d540 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,81 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/url" +) + +const ( + activeDirectoryAPIVersion = "1.0" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL + AuthorizeEndpoint url.URL + TokenEndpoint url.URL + DeviceCodeEndpoint url.URL +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + // it's legal for tenantID to be empty so don't validate it + const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go similarity index 59% rename from vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go index e1d5498a..b38f4c24 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -1,4 +1,18 @@ -package azure +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* This file is largely based on rjw57/oauth2device's code, with the follow differences: @@ -10,16 +24,17 @@ package azure */ import ( + "encoding/json" "fmt" + "io/ioutil" "net/http" "net/url" + "strings" "time" - - "github.com/Azure/go-autorest/autorest" ) const ( - logPrefix = "autorest/azure/devicetoken:" + logPrefix = "autorest/adal/devicetoken:" ) var ( @@ -38,10 +53,17 @@ var ( // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" errTokenSendingFails = "Error occurred while sending request with device code for a token" errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" ) // DeviceCode is the object returned by the device auth endpoint @@ -79,31 +101,45 @@ type deviceToken struct { // InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode // that can be used with CheckForUserCompletion or WaitForUserCompletion. -func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - req, _ := autorest.Prepare( - &http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(oauthConfig.DeviceCodeEndpoint.String()), - autorest.WithFormData(url.Values{ - "client_id": []string{clientID}, - "resource": []string{resource}, - }), - ) +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } - resp, err := autorest.SendWithSender(client, req) + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err) + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty } var code DeviceCode - err = autorest.Respond( - resp, - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&code), - autorest.ByClosing()) + err = json.Unmarshal(rb, &code) if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err) + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) } code.ClientID = clientID @@ -115,33 +151,46 @@ func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, client // CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint // to see if the device flow has: been completed, timed out, or otherwise failed -func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { - req, _ := autorest.Prepare( - &http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(code.OAuthConfig.TokenEndpoint.String()), - autorest.WithFormData(url.Values{ - "client_id": []string{code.ClientID}, - "code": []string{*code.DeviceCode}, - "grant_type": []string{OAuthGrantTypeDeviceCode}, - "resource": []string{code.Resource}, - }), - ) +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } - resp, err := autorest.SendWithSender(client, req) + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err) + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty } var token deviceToken - err = autorest.Respond( - resp, - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), - autorest.ByUnmarshallingJSON(&token), - autorest.ByClosing()) + err = json.Unmarshal(rb, &token) if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err) + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) } if token.Error == nil { @@ -164,12 +213,12 @@ func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, // WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. // This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -func WaitForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { intervalDuration := time.Duration(*code.Interval) * time.Second waitDuration := intervalDuration for { - token, err := CheckForUserCompletion(client, code) + token, err := CheckForUserCompletion(sender, code) if err == nil { return token, nil diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go similarity index 74% rename from vendor/github.com/Azure/go-autorest/autorest/azure/persist.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/persist.go index d5cf62dd..9e15f275 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -1,4 +1,18 @@ -package azure +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. import ( "encoding/json" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 00000000..0e5ad14d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,60 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 00000000..67c5a0b0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,795 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := strconv.Atoi(t.ExpiresOn) + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(float64(s)) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string + Password string +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string + AuthorizationCode string + RedirectURI string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + token.Claims = jwt.MapClaims{ + "aud": spt.oauthConfig.TokenEndpoint.String(), + "iss": spt.clientID, + "sub": spt.clientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + token Token + secret ServicePrincipalSecret + oauthConfig OAuthConfig + clientID string + resource string + autoRefresh bool + refreshLock *sync.RWMutex + refreshWithin time.Duration + sender Sender + + refreshCallbacks []TokenRefreshCallback +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + oauthConfig: oauthConfig, + secret: secret, + clientID: id, + resource: resource, + autoRefresh: true, + refreshLock: &sync.RWMutex{}, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the specified user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != nil { + if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + } + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("resource", resource) + v.Set("api-version", "2018-02-01") + if userAssignedID != nil { + v.Set("client_id", *userAssignedID) + } + msiEndpointURL.RawQuery = v.Encode() + + spt := &ServicePrincipalToken{ + oauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + secret: &ServicePrincipalMSISecret{}, + resource: resource, + autoRefresh: true, + refreshLock: &sync.RWMutex{}, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + + if userAssignedID != nil { + spt.clientID = *userAssignedID + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if spt.autoRefresh && spt.token.WillExpireIn(spt.refreshWithin) { + // take the write lock then check to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.token.WillExpireIn(spt.refreshWithin) { + return spt.refreshInternal(ctx, spt.resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func isIMDS(u url.URL) bool { + imds, err := url.Parse(msiEndpoint) + if err != nil { + return false + } + return u.Host == imds.Host && u.Path == imds.Path +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req = req.WithContext(ctx) + if !isIMDS(spt.oauthConfig.TokenEndpoint) { + v := url.Values{} + v.Set("client_id", spt.clientID) + v.Set("resource", resource) + + if spt.token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.token.RefreshToken) + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + } + + if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok { + req.Method = http.MethodGet + req.Header.Set(metadataHeader, "true") + } + + var resp *http.Response + if isIMDS(spt.oauthConfig.TokenEndpoint) { + resp, err = retry(spt.sender, req) + } else { + resp, err = spt.sender.Do(req) + } + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil) + } + + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) + } + + spt.token = token + + return spt.InvokeRefreshCallbacks(token) +} + +func retry(sender Sender, req *http.Request) (resp *http.Response, err error) { + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // Extra retry status codes requered + retries = append(retries, http.StatusNotFound, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + attempt := 0 + maxAttempts := 5 + + for attempt < maxAttempts { + resp, err = sender.Do(req) + // retry on temporary network errors, e.g. transient network failures. + if (err != nil && !isTemporaryNetworkError(err)) || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) { + return + } + + if !delay(resp, req.Context().Done()) { + select { + case <-time.After(time.Second): + attempt++ + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + } + return +} + +func isTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + return true + } + return false +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func delay(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) + if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { + select { + case <-time.After(time.Duration(retryAfter) * time.Second): + return true + case <-cancel: + return false + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.autoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.refreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.token +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 00000000..77eff45b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,259 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if sender == nil { + sender = &http.Client{} + } + return &BearerAuthorizerCallback{sender: sender, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(resp *http.Response) bool { + authHeader := resp.Header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go index 51f1c4bb..aafdf021 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -57,7 +57,22 @@ generated clients, see the Client described below. */ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( + "context" "net/http" "time" ) @@ -73,6 +88,9 @@ const ( // ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set // and false otherwise. func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } return containsInt(codes, resp.StatusCode) } @@ -113,3 +131,20 @@ func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Reque return req, nil } + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index 6e076981..a58e5ef3 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -1,7 +1,23 @@ package azure +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" + "context" + "encoding/json" "fmt" "io/ioutil" "net/http" @@ -17,18 +33,190 @@ const ( ) const ( - methodDelete = "DELETE" - methodPatch = "PATCH" - methodPost = "POST" - methodPut = "PUT" - methodGet = "GET" - operationInProgress string = "InProgress" operationCanceled string = "Canceled" operationFailed string = "Failed" operationSucceeded string = "Succeeded" ) +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + req *http.Request + resp *http.Response + ps pollingState +} + +// NewFuture returns a new Future object initialized with the specified request. +func NewFuture(req *http.Request) Future { + return Future{req: req} +} + +// Response returns the last HTTP response or nil if there isn't one. +func (f Future) Response() *http.Response { + return f.resp +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.ps.State == "" { + return "Unknown" + } + return f.ps.State +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + return f.ps.PollingMethod +} + +// Done queries the service to see if the operation has completed. +func (f *Future) Done(sender autorest.Sender) (bool, error) { + // exit early if this future has terminated + if f.ps.hasTerminated() { + return true, f.errorInfo() + } + resp, err := sender.Do(f.req) + f.resp = resp + if err != nil { + return false, err + } + + if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { + // check response body for error content + if resp.Body != nil { + type respErr struct { + ServiceError ServiceError `json:"error"` + } + re := respErr{} + + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false, err + } + err = json.Unmarshal(b, &re) + if err != nil { + return false, err + } + return false, re.ServiceError + } + + // try to return something meaningful + return false, ServiceError{ + Code: fmt.Sprintf("%v", resp.StatusCode), + Message: resp.Status, + } + } + + err = updatePollingState(resp, &f.ps) + if err != nil { + return false, err + } + + if f.ps.hasTerminated() { + return true, f.errorInfo() + } + + f.req, err = newPollingRequest(f.ps) + return false, err +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.resp == nil { + return 0, false + } + + retry := f.resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletion will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error { + ctx, cancel := context.WithTimeout(ctx, client.PollingDuration) + defer cancel() + + done, err := f.Done(client) + for attempts := 0; !done; done, err = f.Done(client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "azure", "WaitForCompletion", f.resp, "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(ctx.Err(), "azure", "WaitForCompletion", f.resp, "context has been cancelled") + } + } + return err +} + +// if the operation failed the polling state will contain +// error information and implements the error interface +func (f *Future) errorInfo() error { + if !f.ps.hasSucceeded() { + return f.ps + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(&f.ps) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + err := json.Unmarshal(data, &f.ps) + if err != nil { + return err + } + f.req, err = newPollingRequest(f.ps) + return err +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +// For LROs that use the Location header the final URL value is used to retrieve the result. +func (f Future) PollingURL() string { + return f.ps.URI +} + // DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure // long-running operation. It will delay between requests for the duration specified in the // RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by @@ -40,8 +228,7 @@ func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { if err != nil { return resp, err } - pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK} - if !autorest.ResponseHasStatusCode(resp, pollingCodes...) { + if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { return resp, nil } @@ -58,10 +245,11 @@ func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { break } - r, err = newPollingRequest(resp, ps) + r, err = newPollingRequest(ps) if err != nil { return resp, err } + r = r.WithContext(resp.Request.Context()) delay = autorest.GetRetryAfter(resp, delay) resp, err = autorest.SendWithSender(s, r, @@ -78,20 +266,15 @@ func getAsyncOperation(resp *http.Response) string { } func hasSucceeded(state string) bool { - return state == operationSucceeded + return strings.EqualFold(state, operationSucceeded) } func hasTerminated(state string) bool { - switch state { - case operationCanceled, operationFailed, operationSucceeded: - return true - default: - return false - } + return strings.EqualFold(state, operationCanceled) || strings.EqualFold(state, operationFailed) || strings.EqualFold(state, operationSucceeded) } func hasFailed(state string) bool { - return state == operationFailed + return strings.EqualFold(state, operationFailed) } type provisioningTracker interface { @@ -149,39 +332,50 @@ func (ps provisioningStatus) hasTerminated() bool { } func (ps provisioningStatus) hasProvisioningError() bool { - return ps.ProvisioningError != ServiceError{} + // code and message are required fields so only check them + return len(ps.ProvisioningError.Code) > 0 || + len(ps.ProvisioningError.Message) > 0 } -type pollingResponseFormat string +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string const ( - usesOperationResponse pollingResponseFormat = "OperationResponse" - usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus" - formatIsUnknown pollingResponseFormat = "" + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" ) type pollingState struct { - responseFormat pollingResponseFormat - uri string - state string - code string - message string + PollingMethod PollingMethodType `json:"pollingMethod"` + URI string `json:"uri"` + State string `json:"state"` + ServiceError *ServiceError `json:"error,omitempty"` } func (ps pollingState) hasSucceeded() bool { - return hasSucceeded(ps.state) + return hasSucceeded(ps.State) } func (ps pollingState) hasTerminated() bool { - return hasTerminated(ps.state) + return hasTerminated(ps.State) } func (ps pollingState) hasFailed() bool { - return hasFailed(ps.state) + return hasFailed(ps.State) } func (ps pollingState) Error() string { - return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message) + s := fmt.Sprintf("Long running operation terminated with status '%s'", ps.State) + if ps.ServiceError != nil { + s = fmt.Sprintf("%s: %+v", s, *ps.ServiceError) + } + return s } // updatePollingState maps the operation status -- retrieved from either a provisioningState @@ -196,7 +390,7 @@ func updatePollingState(resp *http.Response, ps *pollingState) error { // -- The first response will always be a provisioningStatus response; only the polling requests, // depending on the header returned, may be something otherwise. var pt provisioningTracker - if ps.responseFormat == usesOperationResponse { + if ps.PollingMethod == PollingAsyncOperation { pt = &operationResource{} } else { pt = &provisioningStatus{} @@ -204,30 +398,30 @@ func updatePollingState(resp *http.Response, ps *pollingState) error { // If this is the first request (that is, the polling response shape is unknown), determine how // to poll and what to expect - if ps.responseFormat == formatIsUnknown { + if ps.PollingMethod == PollingUnknown { req := resp.Request if req == nil { return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing") } // Prefer the Azure-AsyncOperation header - ps.uri = getAsyncOperation(resp) - if ps.uri != "" { - ps.responseFormat = usesOperationResponse + ps.URI = getAsyncOperation(resp) + if ps.URI != "" { + ps.PollingMethod = PollingAsyncOperation } else { - ps.responseFormat = usesProvisioningStatus + ps.PollingMethod = PollingLocation } // Else, use the Location header - if ps.uri == "" { - ps.uri = autorest.GetLocation(resp) + if ps.URI == "" { + ps.URI = autorest.GetLocation(resp) } // Lastly, requests against an existing resource, use the last request URI - if ps.uri == "" { + if ps.URI == "" { m := strings.ToUpper(req.Method) - if m == methodPatch || m == methodPut || m == methodGet { - ps.uri = req.URL.String() + if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet { + ps.URI = req.URL.String() } } } @@ -248,23 +442,23 @@ func updatePollingState(resp *http.Response, ps *pollingState) error { // -- Unknown states are per-service inprogress states // -- Otherwise, infer state from HTTP status code if pt.hasTerminated() { - ps.state = pt.state() + ps.State = pt.state() } else if pt.state() != "" { - ps.state = operationInProgress + ps.State = operationInProgress } else { switch resp.StatusCode { case http.StatusAccepted: - ps.state = operationInProgress + ps.State = operationInProgress case http.StatusNoContent, http.StatusCreated, http.StatusOK: - ps.state = operationSucceeded + ps.State = operationSucceeded default: - ps.state = operationFailed + ps.State = operationFailed } } - if ps.state == operationInProgress && ps.uri == "" { + if strings.EqualFold(ps.State, operationInProgress) && ps.URI == "" { return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL) } @@ -273,36 +467,45 @@ func updatePollingState(resp *http.Response, ps *pollingState) error { // -- Response // -- Otherwise, Unknown if ps.hasFailed() { - if ps.responseFormat == usesOperationResponse { - or := pt.(*operationResource) - ps.code = or.OperationError.Code - ps.message = or.OperationError.Message + if or, ok := pt.(*operationResource); ok { + ps.ServiceError = &or.OperationError + } else if p, ok := pt.(*provisioningStatus); ok && p.hasProvisioningError() { + ps.ServiceError = &p.ProvisioningError } else { - p := pt.(*provisioningStatus) - if p.hasProvisioningError() { - ps.code = p.ProvisioningError.Code - ps.message = p.ProvisioningError.Message - } else { - ps.code = "Unknown" - ps.message = "None" + ps.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "None", } } } return nil } -func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) { - req := resp.Request - if req == nil { - return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing") - } - - reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel}, +func newPollingRequest(ps pollingState) (*http.Request, error) { + reqPoll, err := autorest.Prepare(&http.Request{}, autorest.AsGet(), - autorest.WithBaseURL(ps.uri)) + autorest.WithBaseURL(ps.URI)) if err != nil { - return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri) + return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.URI) } return reqPoll, nil } + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go index 3f4d1342..18d02952 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -1,16 +1,29 @@ -/* -Package azure provides Azure-specific implementations used with AutoRest. - -See the included examples for more detail. -*/ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. package azure +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/json" "fmt" "io/ioutil" "net/http" + "regexp" "strconv" + "strings" "github.com/Azure/go-autorest/autorest" ) @@ -29,21 +42,88 @@ const ( ) // ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Details *[]interface{} `json:"details"` + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` } func (se ServiceError) Error() string { - if se.Details != nil { - d, err := json.Marshal(*(se.Details)) - if err != nil { - return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details) - } - return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d)) + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) } - return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%v", string(d)) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%v", string(d)) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // per the OData v4 spec the details field must be an array of JSON objects. + // unfortunately not all services adhear to the spec and just return a single + // object instead of an array with one object. so we have to perform some + // shenanigans to accommodate both cases. + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceError1 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + } + + type serviceError2 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + } + + se1 := serviceError1{} + err := json.Unmarshal(b, &se1) + if err == nil { + se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError) + return nil + } + + se2 := serviceError2{} + err = json.Unmarshal(b, &se2) + if err == nil { + se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError) + se.Details = append(se.Details, se2.Details) + return nil + } + return err +} + +func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}) { + se.Code = code + se.Message = message + se.Target = target + se.Details = details + se.InnerError = inner } // RequestError describes an error response returned by Azure service. @@ -69,6 +149,41 @@ func IsAzureError(e error) bool { return ok } +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + // NewErrorWithError creates a new Error conforming object from the // passed packageType, method, statusCode of the given resp (UndefinedStatusCode // if resp is nil), message, and original error. message is treated as a format @@ -165,7 +280,13 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { if decodeErr != nil { return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) } else if e.ServiceError == nil { - e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"} + // Check if error is unwrapped ServiceError + if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" { + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + } } e.RequestID = ExtractRequestID(resp) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/config.go b/vendor/github.com/Azure/go-autorest/autorest/azure/config.go deleted file mode 100644 index bea30b0d..00000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/config.go +++ /dev/null @@ -1,13 +0,0 @@ -package azure - -import ( - "net/url" -) - -// OAuthConfig represents the endpoints needed -// in OAuth operations -type OAuthConfig struct { - AuthorizeEndpoint url.URL - TokenEndpoint url.URL - DeviceCodeEndpoint url.URL -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 4701b437..7e41f7fd 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -1,14 +1,30 @@ package azure +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( + "encoding/json" "fmt" - "net/url" + "io/ioutil" + "os" "strings" ) -const ( - activeDirectoryAPIVersion = "1.0" -) +// EnvironmentFilepathName captures the name of the environment variable containing the path to the file +// to be used while populating the Azure Environment. +const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" var environments = map[string]Environment{ "AZURECHINACLOUD": ChinaCloud, @@ -28,6 +44,8 @@ type Environment struct { GalleryEndpoint string `json:"galleryEndpoint"` KeyVaultEndpoint string `json:"keyVaultEndpoint"` GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` StorageEndpointSuffix string `json:"storageEndpointSuffix"` SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` @@ -36,6 +54,7 @@ type Environment struct { ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + TokenAudience string `json:"tokenAudience"` } var ( @@ -50,14 +69,17 @@ var ( GalleryEndpoint: "https://gallery.azure.com/", KeyVaultEndpoint: "https://vault.azure.net/", GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", StorageEndpointSuffix: "core.windows.net", SQLDatabaseDNSSuffix: "database.windows.net", TrafficManagerDNSSuffix: "trafficmanager.net", KeyVaultDNSSuffix: "vault.azure.net", - ServiceBusEndpointSuffix: "servicebus.azure.com", + ServiceBusEndpointSuffix: "servicebus.windows.net", ServiceManagementVMDNSSuffix: "cloudapp.net", ResourceManagerVMDNSSuffix: "cloudapp.azure.com", ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.azure.com/", } // USGovernmentCloud is the cloud environment for the US Government @@ -67,10 +89,12 @@ var ( PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", GalleryEndpoint: "https://gallery.usgovcloudapi.net/", KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - GraphEndpoint: "https://graph.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", StorageEndpointSuffix: "core.usgovcloudapi.net", SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", TrafficManagerDNSSuffix: "usgovtrafficmanager.net", @@ -79,6 +103,7 @@ var ( ServiceManagementVMDNSSuffix: "usgovcloudapp.net", ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.usgovcloudapi.net/", } // ChinaCloud is the cloud environment operated in China @@ -92,14 +117,17 @@ var ( GalleryEndpoint: "https://gallery.chinacloudapi.cn/", KeyVaultEndpoint: "https://vault.azure.cn/", GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", StorageEndpointSuffix: "core.chinacloudapi.cn", SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", TrafficManagerDNSSuffix: "trafficmanager.cn", KeyVaultDNSSuffix: "vault.azure.cn", - ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", ServiceManagementVMDNSSuffix: "chinacloudapp.cn", ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.chinacloudapi.cn/", } // GermanCloud is the cloud environment operated in Germany @@ -113,6 +141,8 @@ var ( GalleryEndpoint: "https://gallery.cloudapi.de/", KeyVaultEndpoint: "https://vault.microsoftazure.de/", GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", StorageEndpointSuffix: "core.cloudapi.de", SQLDatabaseDNSSuffix: "database.cloudapi.de", TrafficManagerDNSSuffix: "azuretrafficmanager.de", @@ -121,47 +151,41 @@ var ( ServiceManagementVMDNSSuffix: "azurecloudapp.de", ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.microsoftazure.de/", } ) -// EnvironmentFromName returns an Environment based on the common name specified +// EnvironmentFromName returns an Environment based on the common name specified. func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + name = strings.ToUpper(name) env, ok := environments[name] if !ok { return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) } + return env, nil } -// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls -func (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) { - return OAuthConfigForTenant(env.ActiveDirectoryEndpoint, tenantID) -} - -// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls for target cloud auth endpoint -func OAuthConfigForTenant(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - template := "%s/oauth2/%s?api-version=%s" - u, err := url.Parse(activeDirectoryEndpoint) +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) if err != nil { - return nil, err - } - authorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "authorize", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - tokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, "token", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - deviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "devicecode", activeDirectoryAPIVersion)) - if err != nil { - return nil, err + return } - return &OAuthConfig{ - AuthorizeEndpoint: *authorizeURL, - TokenEndpoint: *tokenURL, - DeviceCodeEndpoint: *deviceCodeURL, - }, nil + err = json.Unmarshal(fileContents, &unmarshaled) + + return } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 00000000..507f9e95 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 00000000..65ad0afc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,200 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + var re RequestError + err = autorest.Respond( + resp, + autorest.ByUnmarshallingJSON(&re), + ) + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) + } + } + } + return resp, fmt.Errorf("failed request: %s", err) + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + now := time.Now() + for err == nil && time.Since(now) < client.PollingDuration { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if !(time.Since(now) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/token.go deleted file mode 100644 index cfcd0301..00000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/token.go +++ /dev/null @@ -1,363 +0,0 @@ -package azure - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "strconv" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/dgrijalva/jwt-go" -) - -const ( - defaultRefresh = 5 * time.Minute - tokenBaseDate = "1970-01-01T00:00:00Z" - - // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow - OAuthGrantTypeDeviceCode = "device_code" - - // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows - OAuthGrantTypeClientCredentials = "client_credentials" - - // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows - OAuthGrantTypeRefreshToken = "refresh_token" -) - -var expirationBase time.Time - -func init() { - expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate) -} - -// TokenRefreshCallback is the type representing callbacks that will be called after -// a successful token refresh -type TokenRefreshCallback func(Token) error - -// Token encapsulates the access token used to authorize Azure requests. -type Token struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - - ExpiresIn string `json:"expires_in"` - ExpiresOn string `json:"expires_on"` - NotBefore string `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` -} - -// Expires returns the time.Time when the Token expires. -func (t Token) Expires() time.Time { - s, err := strconv.Atoi(t.ExpiresOn) - if err != nil { - s = -3600 - } - return expirationBase.Add(time.Duration(s) * time.Second).UTC() -} - -// IsExpired returns true if the Token is expired, false otherwise. -func (t Token) IsExpired() bool { - return t.WillExpireIn(0) -} - -// WillExpireIn returns true if the Token will expire after the passed time.Duration interval -// from now, false otherwise. -func (t Token) WillExpireIn(d time.Duration) bool { - return !t.Expires().After(time.Now().Add(d)) -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the AccessToken of the Token. -func (t *Token) WithAuthorization() autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r) - }) - } -} - -// ServicePrincipalNoSecret represents a secret type that contains no secret -// meaning it is not valid for fetching a fresh token. This is used by Manual -type ServicePrincipalNoSecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret -// It only returns an error for the ServicePrincipalNoSecret type -func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") -} - -// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form -// that is submitted when acquiring an oAuth token. -type ServicePrincipalSecret interface { - SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error -} - -// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. -type ServicePrincipalTokenSecret struct { - ClientSecret string -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using the client_secret. -func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("client_secret", tokenSecret.ClientSecret) - return nil -} - -// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. -type ServicePrincipalCertificateSecret struct { - Certificate *x509.Certificate - PrivateKey *rsa.PrivateKey -} - -// SignJwt returns the JWT signed with the certificate's private key. -func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { - hasher := sha1.New() - _, err := hasher.Write(secret.Certificate.Raw) - if err != nil { - return "", err - } - - thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) - - // The jti (JWT ID) claim provides a unique identifier for the JWT. - jti := make([]byte, 20) - _, err = rand.Read(jti) - if err != nil { - return "", err - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Header["x5t"] = thumbprint - token.Claims = jwt.MapClaims{ - "aud": spt.oauthConfig.TokenEndpoint.String(), - "iss": spt.clientID, - "sub": spt.clientID, - "jti": base64.URLEncoding.EncodeToString(jti), - "nbf": time.Now().Unix(), - "exp": time.Now().Add(time.Hour * 24).Unix(), - } - - signedString, err := token.SignedString(secret.PrivateKey) - return signedString, err -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. -func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - jwt, err := secret.SignJwt(spt) - if err != nil { - return err - } - - v.Set("client_assertion", jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// ServicePrincipalToken encapsulates a Token created for a Service Principal. -type ServicePrincipalToken struct { - Token - - secret ServicePrincipalSecret - oauthConfig OAuthConfig - clientID string - resource string - autoRefresh bool - refreshWithin time.Duration - sender autorest.Sender - - refreshCallbacks []TokenRefreshCallback -} - -// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. -func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - spt := &ServicePrincipalToken{ - oauthConfig: oauthConfig, - secret: secret, - clientID: id, - resource: resource, - autoRefresh: true, - refreshWithin: defaultRefresh, - sender: &http.Client{}, - refreshCallbacks: callbacks, - } - return spt, nil -} - -// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token -func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalNoSecret{}, - callbacks...) - if err != nil { - return nil, err - } - - spt.Token = token - - return spt, nil -} - -// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal -// credentials scoped to the named resource. -func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalTokenSecret{ - ClientSecret: secret, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. -func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - callbacks..., - ) -} - -// EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin). -func (spt *ServicePrincipalToken) EnsureFresh() error { - if spt.WillExpireIn(spt.refreshWithin) { - return spt.Refresh() - } - return nil -} - -// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization -func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { - if spt.refreshCallbacks != nil { - for _, callback := range spt.refreshCallbacks { - err := callback(spt.Token) - if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "InvokeRefreshCallbacks", nil, "A TokenRefreshCallback handler returned an error") - } - } - } - return nil -} - -// Refresh obtains a fresh token for the Service Principal. -func (spt *ServicePrincipalToken) Refresh() error { - return spt.refreshInternal(spt.resource) -} - -// RefreshExchange refreshes the token, but for a different resource. -func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { - return spt.refreshInternal(resource) -} - -func (spt *ServicePrincipalToken) refreshInternal(resource string) error { - v := url.Values{} - v.Set("client_id", spt.clientID) - v.Set("resource", resource) - - if spt.RefreshToken != "" { - v.Set("grant_type", OAuthGrantTypeRefreshToken) - v.Set("refresh_token", spt.RefreshToken) - } else { - v.Set("grant_type", OAuthGrantTypeClientCredentials) - err := spt.secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - - req, _ := autorest.Prepare(&http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(spt.oauthConfig.TokenEndpoint.String()), - autorest.WithFormData(v)) - - resp, err := autorest.SendWithSender(spt.sender, req) - if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "Refresh", resp, "Failure sending request for Service Principal %s", - spt.clientID) - } - - var newToken Token - err = autorest.Respond(resp, - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&newToken), - autorest.ByClosing()) - if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "Refresh", resp, "Failure handling response to Service Principal %s request", - spt.clientID) - } - - spt.Token = newToken - - err = spt.InvokeRefreshCallbacks(newToken) - if err != nil { - // its already wrapped inside InvokeRefreshCallbacks - return err - } - - return nil -} - -// SetAutoRefresh enables or disables automatic refreshing of stale tokens. -func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { - spt.autoRefresh = autoRefresh -} - -// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will -// refresh the token. -func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { - spt.refreshWithin = d - return -} - -// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An -// undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) { - spt.sender = s -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken. -// -// By default, the token will automatically refresh if nearly expired (as determined by the -// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing -// tokens. -func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - if spt.autoRefresh { - err := spt.EnsureFresh() - if err != nil { - return r, autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "WithAuthorization", nil, "Failed to refresh Service Principal Token for request to %s", - r.URL) - } - } - return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r) - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index b5f94b5c..4e92dcad 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "fmt" @@ -21,6 +35,9 @@ const ( // DefaultRetryAttempts is number of attempts for retry status codes (5xx). DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second ) var ( @@ -33,8 +50,10 @@ var ( Version(), ) - statusCodesForRetry = []int{ + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 http.StatusInternalServerError, // 500 http.StatusBadGateway, // 502 http.StatusServiceUnavailable, // 503 @@ -147,6 +166,9 @@ type Client struct { UserAgent string Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool } // NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed @@ -156,9 +178,10 @@ func NewClientWithUserAgent(ua string) Client { PollingDelay: DefaultPollingDelay, PollingDuration: DefaultPollingDuration, RetryAttempts: DefaultRetryAttempts, - RetryDuration: 30 * time.Second, + RetryDuration: DefaultRetryDuration, UserAgent: defaultUserAgent, } + c.Sender = c.sender() c.AddToUserAgent(ua) return c } @@ -180,16 +203,22 @@ func (c Client) Do(r *http.Request) (*http.Response, error) { r, _ = Prepare(r, WithUserAgent(c.UserAgent)) } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations r, err := Prepare(r, - c.WithInspection(), - c.WithAuthorization()) + c.WithAuthorization(), + c.WithInspection()) if err != nil { - return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") } - resp, err := SendWithSender(c.sender(), r, - DoRetryForStatusCodes(c.RetryAttempts, c.RetryDuration, statusCodesForRetry...)) - Respond(resp, - c.ByInspecting()) + + resp, err := SendWithSender(c.sender(), r) + Respond(resp, c.ByInspecting()) return resp, err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go index 80ca60e9..c4571065 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/date.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -5,6 +5,20 @@ time.Time types. And both convert to time.Time through a ToTime method. */ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go index c1af6296..b453fad0 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/time.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -1,5 +1,19 @@ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "regexp" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go index 11995fb9..48fb39ba 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -1,5 +1,19 @@ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "errors" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 00000000..7073959b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go index 207b1a24..12addf0e 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -1,5 +1,19 @@ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "strings" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go index 4bcb8f27..f724f333 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "net/http" @@ -31,6 +45,9 @@ type DetailedError struct { // Service Error is the response body of failed API in bytes ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response } // NewError creates a new Error conforming object from the passed packageType, method, and @@ -67,6 +84,7 @@ func NewErrorWithError(original error, packageType string, method string, resp * Method: method, StatusCode: statusCode, Message: fmt.Sprintf(message, args...), + Response: resp, } } diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go index c9deb261..6d67bd73 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" @@ -13,8 +27,9 @@ import ( ) const ( - mimeTypeJSON = "application/json" - mimeTypeFormPost = "application/x-www-form-urlencoded" + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" headerAuthorization = "Authorization" headerContentType = "Content-Type" @@ -98,6 +113,28 @@ func WithHeader(header string, value string) PrepareDecorator { } } +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + // WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose // value is "Bearer " followed by the supplied token. func WithBearerAuthorization(token string) PrepareDecorator { @@ -128,6 +165,11 @@ func AsJSON() PrepareDecorator { return AsContentType(mimeTypeJSON) } +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + // WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The // decorator does not validate that the passed method string is a known HTTP method. func WithMethod(method string) PrepareDecorator { @@ -201,6 +243,11 @@ func WithFormData(v url.Values) PrepareDecorator { r, err := p.Prepare(r) if err == nil { s := v.Encode() + + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) r.ContentLength = int64(len(s)) r.Body = ioutil.NopCloser(strings.NewReader(s)) } @@ -416,28 +463,18 @@ func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorato if r.URL == nil { return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") } + v := r.URL.Query() for key, value := range parameters { - v.Add(key, value) + d, err := url.QueryUnescape(value) + if err != nil { + return r, err + } + v.Add(key, d) } - r.URL.RawQuery = createQuery(v) + r.URL.RawQuery = v.Encode() } return r, err }) } } - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go index 87f71e58..a908a0ad 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 00000000..fa11dbed --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 00000000..7143cc61 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 00000000..ae15c6bf --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index 9c069781..cacbd815 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -1,12 +1,25 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( - "bytes" "fmt" - "io/ioutil" "log" "math" "net/http" + "strconv" "time" ) @@ -73,7 +86,7 @@ func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*ht func AfterDelay(d time.Duration) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (*http.Response, error) { - if !DelayForBackoff(d, 0, r.Cancel) { + if !DelayForBackoff(d, 0, r.Context().Done()) { return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") } return s.Do(r) @@ -152,7 +165,7 @@ func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ... resp, err = s.Do(r) if err == nil && ResponseHasStatusCode(resp, codes...) { - r, err = NewPollingRequest(resp, r.Cancel) + r, err = NewPollingRequestWithContext(r.Context(), resp) for err == nil && ResponseHasStatusCode(resp, codes...) { Respond(resp, @@ -175,12 +188,19 @@ func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ... func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) for attempt := 0; attempt < attempts; attempt++ { - resp, err = s.Do(r) + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) if err == nil { return resp, err } - DelayForBackoff(backoff, attempt, r.Cancel) + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } } return resp, err }) @@ -194,29 +214,57 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - b := []byte{} - if r.Body != nil { - b, err = ioutil.ReadAll(r.Body) + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + attempts++ + for attempt := 0; attempt < attempts; { + err = rr.Prepare() if err != nil { return resp, err } - } - - // Increment to add the first call (attempts denotes number of retries) - attempts++ - for attempt := 0; attempt < attempts; attempt++ { - r.Body = ioutil.NopCloser(bytes.NewBuffer(b)) - resp, err = s.Do(r) - if err != nil || !ResponseHasStatusCode(resp, codes...) { + resp, err = s.Do(rr.Request()) + // if the error isn't temporary don't bother retrying + if err != nil && !IsTemporaryNetworkError(err) { + return nil, err + } + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { return resp, err } - DelayForBackoff(backoff, attempt, r.Cancel) + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + // don't count a 429 against the number of attempts + // so that we continue to retry until it succeeds + if resp == nil || resp.StatusCode != http.StatusTooManyRequests { + attempt++ + } } return resp, err }) } } +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in +// responses with status code 429 +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) + if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { + select { + case <-time.After(time.Duration(retryAfter) * time.Second): + return true + case <-cancel: + return false + } + } + return false +} + // DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal // to or greater than the specified duration, exponentially backing off between requests using the // supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the @@ -224,13 +272,20 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) end := time.Now().Add(d) for attempt := 0; time.Now().Before(end); attempt++ { - resp, err = s.Do(r) + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) if err == nil { return resp, err } - DelayForBackoff(backoff, attempt, r.Cancel) + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } } return resp, err }) diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go index 78067148..f3a42bfc 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -1,15 +1,32 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" "encoding/xml" "fmt" "io" + "net" + "net/http" "net/url" "reflect" - "sort" "strings" + + "github.com/Azure/go-autorest/autorest/adal" ) // EncodedAs is a series of constants specifying various data encodings @@ -123,13 +140,38 @@ func MapToValues(m map[string]interface{}) url.Values { return v } -// String method converts interface v to string. If interface is a list, it -// joins list elements using separator. -func String(v interface{}, sep ...string) string { - if len(sep) > 0 { - return ensureValueString(strings.Join(v.([]string), sep[0])) +// AsStringSlice method converts interface{} to []string. This expects a +//that the parameter passed to be a slice or array of a type that has the underlying +//type a string. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") } - return ensureValueString(v) + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, v.Index(i).String()) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the seperator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) } // Encode method encodes url path and query parameters. @@ -153,26 +195,33 @@ func queryEscape(s string) string { return url.QueryEscape(s) } -// This method is same as Encode() method of "net/url" go package, -// except it does not encode the query parameters because they -// already come encoded. It formats values map in query format (bar=foo&a=b). -func createQuery(v url.Values) string { - var buf bytes.Buffer - keys := make([]string, 0, len(v)) - for k := range v { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - vs := v[k] - prefix := url.QueryEscape(k) + "=" - for _, v := range vs { - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - buf.WriteString(v) - } - } - return buf.String() +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + return true + } + return false } diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index 7a0bf9c9..d265055f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -1,23 +1,20 @@ package autorest -import ( - "fmt" -) - -const ( - major = "7" - minor = "3" - patch = "0" - tag = "" - semVerFormat = "%s.%s.%s%s" -) - -var version string +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Version returns the semantic version (see http://semver.org). func Version() string { - if version == "" { - version = fmt.Sprintf(semVerFormat, major, minor, patch, tag) - } - return version + return "v10.8.1" } diff --git a/vendor/github.com/marstr/guid/LICENSE.txt b/vendor/github.com/marstr/guid/LICENSE.txt new file mode 100644 index 00000000..e18a0841 --- /dev/null +++ b/vendor/github.com/marstr/guid/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Martin Strobel + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/marstr/guid/README.md b/vendor/github.com/marstr/guid/README.md new file mode 100644 index 00000000..355fad16 --- /dev/null +++ b/vendor/github.com/marstr/guid/README.md @@ -0,0 +1,27 @@ +[![Build Status](https://travis-ci.org/marstr/guid.svg?branch=master)](https://travis-ci.org/marstr/guid) +[![GoDoc](https://godoc.org/github.com/marstr/guid?status.svg)](https://godoc.org/github.com/marstr/guid) +[![Go Report Card](https://goreportcard.com/badge/github.com/marstr/guid)](https://goreportcard.com/report/github.com/marstr/guid) + +# Guid +Globally unique identifiers offer a quick means of generating non-colliding values across a distributed system. For this implemenation, [RFC 4122](http://ietf.org/rfc/rfc4122.txt) governs the desired behavior. + +## What's in a name? +You have likely already noticed that RFC and some implementations refer to these structures as UUIDs (Universally Unique Identifiers), where as this project is annotated as GUIDs (Globally Unique Identifiers). The name Guid was selected to make clear this project's ties to the [.NET struct Guid.](https://msdn.microsoft.com/en-us/library/system.guid(v=vs.110).aspx) The most obvious relationship is the desire to have the same format specifiers available in this library's Format and Parse methods as .NET would have in its ToString and Parse methods. + +# Installation +- Ensure you have the [Go Programming Language](https://golang.org/) installed on your system. +- Run the command: `go get -u github.com/marstr/guid` + +# Contribution +Contributions are welcome! Feel free to send Pull Requests. Continuous Integration will ensure that you have conformed to Go conventions. Please remember to add tests for your changes. + +# Versioning +This library will adhere to the +[Semantic Versioning 2.0.0](http://semver.org/spec/v2.0.0.html) specification. It may be worth noting this should allow for tools like [glide](https://glide.readthedocs.io/en/latest/) to pull in this library with ease. + +The Release Notes portion of this file will be updated to reflect the most recent major/minor updates, with the option to tag particular bug-fixes as well. Updates to the Release Notes for patches should be addative, where as major/minor updates should replace the previous version. If one desires to see the release notes for an older version, checkout that version of code and open this file. + +# Release Notes 1.1.* + +## v1.1.0 +Adding support for JSON marshaling and unmarshaling. diff --git a/vendor/github.com/marstr/guid/guid.go b/vendor/github.com/marstr/guid/guid.go new file mode 100644 index 00000000..51b038b7 --- /dev/null +++ b/vendor/github.com/marstr/guid/guid.go @@ -0,0 +1,301 @@ +package guid + +import ( + "bytes" + "crypto/rand" + "errors" + "fmt" + "net" + "strings" + "sync" + "time" +) + +// GUID is a unique identifier designed to virtually guarantee non-conflict between values generated +// across a distributed system. +type GUID struct { + timeHighAndVersion uint16 + timeMid uint16 + timeLow uint32 + clockSeqHighAndReserved uint8 + clockSeqLow uint8 + node [6]byte +} + +// Format enumerates the values that are supported by Parse and Format +type Format string + +// These constants define the possible string formats available via this implementation of Guid. +const ( + FormatB Format = "B" // {00000000-0000-0000-0000-000000000000} + FormatD Format = "D" // 00000000-0000-0000-0000-000000000000 + FormatN Format = "N" // 00000000000000000000000000000000 + FormatP Format = "P" // (00000000-0000-0000-0000-000000000000) + FormatX Format = "X" // {0x00000000,0x0000,0x0000,{0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}} + FormatDefault Format = FormatD +) + +// CreationStrategy enumerates the values that are supported for populating the bits of a new Guid. +type CreationStrategy string + +// These constants define the possible creation strategies available via this implementation of Guid. +const ( + CreationStrategyVersion1 CreationStrategy = "version1" + CreationStrategyVersion2 CreationStrategy = "version2" + CreationStrategyVersion3 CreationStrategy = "version3" + CreationStrategyVersion4 CreationStrategy = "version4" + CreationStrategyVersion5 CreationStrategy = "version5" +) + +var emptyGUID GUID + +// NewGUID generates and returns a new globally unique identifier +func NewGUID() GUID { + result, err := version4() + if err != nil { + panic(err) //Version 4 (pseudo-random GUID) doesn't use anything that could fail. + } + return result +} + +var knownStrategies = map[CreationStrategy]func() (GUID, error){ + CreationStrategyVersion1: version1, + CreationStrategyVersion4: version4, +} + +// NewGUIDs generates and returns a new globally unique identifier that conforms to the given strategy. +func NewGUIDs(strategy CreationStrategy) (GUID, error) { + if creator, present := knownStrategies[strategy]; present { + result, err := creator() + return result, err + } + return emptyGUID, errors.New("Unsupported CreationStrategy") +} + +// Empty returns a copy of the default and empty GUID. +func Empty() GUID { + return emptyGUID +} + +var knownFormats = map[Format]string{ + FormatN: "%08x%04x%04x%02x%02x%02x%02x%02x%02x%02x%02x", + FormatD: "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", + FormatB: "{%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x}", + FormatP: "(%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x)", + FormatX: "{0x%08x,0x%04x,0x%04x,{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x}}", +} + +// MarshalJSON writes a GUID as a JSON string. +func (guid GUID) MarshalJSON() (marshaled []byte, err error) { + buf := bytes.Buffer{} + + _, err = buf.WriteRune('"') + buf.WriteString(guid.String()) + buf.WriteRune('"') + + marshaled = buf.Bytes() + return +} + +// Parse instantiates a GUID from a text representation of the same GUID. +// This is the inverse of function family String() +func Parse(value string) (GUID, error) { + var guid GUID + for _, fullFormat := range knownFormats { + parity, err := fmt.Sscanf( + value, + fullFormat, + &guid.timeLow, + &guid.timeMid, + &guid.timeHighAndVersion, + &guid.clockSeqHighAndReserved, + &guid.clockSeqLow, + &guid.node[0], + &guid.node[1], + &guid.node[2], + &guid.node[3], + &guid.node[4], + &guid.node[5]) + if parity == 11 && err == nil { + return guid, err + } + } + return emptyGUID, fmt.Errorf("\"%s\" is not in a recognized format", value) +} + +// String returns a text representation of a GUID in the default format. +func (guid GUID) String() string { + return guid.Stringf(FormatDefault) +} + +// Stringf returns a text representation of a GUID that conforms to the specified format. +// If an unrecognized format is provided, the empty string is returned. +func (guid GUID) Stringf(format Format) string { + if format == "" { + format = FormatDefault + } + fullFormat, present := knownFormats[format] + if !present { + return "" + } + return fmt.Sprintf( + fullFormat, + guid.timeLow, + guid.timeMid, + guid.timeHighAndVersion, + guid.clockSeqHighAndReserved, + guid.clockSeqLow, + guid.node[0], + guid.node[1], + guid.node[2], + guid.node[3], + guid.node[4], + guid.node[5]) +} + +// UnmarshalJSON parses a GUID from a JSON string token. +func (guid *GUID) UnmarshalJSON(marshaled []byte) (err error) { + if len(marshaled) < 2 { + err = errors.New("JSON GUID must be surrounded by quotes") + return + } + stripped := marshaled[1 : len(marshaled)-1] + *guid, err = Parse(string(stripped)) + return +} + +// Version reads a GUID to parse which mechanism of generating GUIDS was employed. +// Values returned here are documented in rfc4122.txt. +func (guid GUID) Version() uint { + return uint(guid.timeHighAndVersion >> 12) +} + +var unixToGregorianOffset = time.Date(1970, 01, 01, 0, 0, 00, 0, time.UTC).Sub(time.Date(1582, 10, 15, 0, 0, 0, 0, time.UTC)) + +// getRFC4122Time returns a 60-bit count of 100-nanosecond intervals since 00:00:00.00 October 15th, 1582 +func getRFC4122Time() int64 { + currentTime := time.Now().UTC().Add(unixToGregorianOffset).UnixNano() + currentTime /= 100 + return currentTime & 0x0FFFFFFFFFFFFFFF +} + +var clockSeqVal uint16 +var clockSeqKey sync.Mutex + +func getClockSequence() (uint16, error) { + clockSeqKey.Lock() + defer clockSeqKey.Unlock() + + if 0 == clockSeqVal { + var temp [2]byte + if parity, err := rand.Read(temp[:]); !(2 == parity && nil == err) { + return 0, err + } + clockSeqVal = uint16(temp[0])<<8 | uint16(temp[1]) + } + clockSeqVal++ + return clockSeqVal, nil +} + +func getMACAddress() (mac [6]byte, err error) { + var hostNICs []net.Interface + + hostNICs, err = net.Interfaces() + if err != nil { + return + } + + for _, nic := range hostNICs { + var parity int + + parity, err = fmt.Sscanf( + strings.ToLower(nic.HardwareAddr.String()), + "%02x:%02x:%02x:%02x:%02x:%02x", + &mac[0], + &mac[1], + &mac[2], + &mac[3], + &mac[4], + &mac[5]) + + if parity == len(mac) { + return + } + } + + err = fmt.Errorf("No suitable address found") + + return +} + +func version1() (result GUID, err error) { + var localMAC [6]byte + var clockSeq uint16 + + currentTime := getRFC4122Time() + + result.timeLow = uint32(currentTime) + result.timeMid = uint16(currentTime >> 32) + result.timeHighAndVersion = uint16(currentTime >> 48) + if err = result.setVersion(1); err != nil { + return emptyGUID, err + } + + if localMAC, err = getMACAddress(); nil != err { + if parity, err := rand.Read(localMAC[:]); !(len(localMAC) != parity && err == nil) { + return emptyGUID, err + } + localMAC[0] |= 0x1 + } + copy(result.node[:], localMAC[:]) + + if clockSeq, err = getClockSequence(); nil != err { + return emptyGUID, err + } + + result.clockSeqLow = uint8(clockSeq) + result.clockSeqHighAndReserved = uint8(clockSeq >> 8) + + result.setReservedBits() + + return +} + +func version4() (GUID, error) { + var retval GUID + var bits [10]byte + + if parity, err := rand.Read(bits[:]); !(len(bits) == parity && err == nil) { + return emptyGUID, err + } + retval.timeHighAndVersion |= uint16(bits[0]) | uint16(bits[1])<<8 + retval.timeMid |= uint16(bits[2]) | uint16(bits[3])<<8 + retval.timeLow |= uint32(bits[4]) | uint32(bits[5])<<8 | uint32(bits[6])<<16 | uint32(bits[7])<<24 + retval.clockSeqHighAndReserved = uint8(bits[8]) + retval.clockSeqLow = uint8(bits[9]) + + //Randomly set clock-sequence, reserved, and node + if written, err := rand.Read(retval.node[:]); !(nil == err && written == len(retval.node)) { + retval = emptyGUID + return retval, err + } + + if err := retval.setVersion(4); nil != err { + return emptyGUID, err + } + retval.setReservedBits() + + return retval, nil +} + +func (guid *GUID) setVersion(version uint16) error { + if version > 5 || version == 0 { + return fmt.Errorf("While setting GUID version, unsupported version: %d", version) + } + guid.timeHighAndVersion = (guid.timeHighAndVersion & 0x0fff) | version<<12 + return nil +} + +func (guid *GUID) setReservedBits() { + guid.clockSeqHighAndReserved = (guid.clockSeqHighAndReserved & 0x3f) | 0x80 +} diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE new file mode 100644 index 00000000..926d5498 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2018 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md new file mode 100644 index 00000000..7b1a722d --- /dev/null +++ b/vendor/github.com/satori/go.uuid/README.md @@ -0,0 +1,65 @@ +# UUID package for Go language + +[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) +[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) +[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) + +This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. + +With 100% test coverage and benchmarks out of box. + +Supported versions: +* Version 1, based on timestamp and MAC address (RFC 4122) +* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) +* Version 3, based on MD5 hashing (RFC 4122) +* Version 4, based on random numbers (RFC 4122) +* Version 5, based on SHA-1 hashing (RFC 4122) + +## Installation + +Use the `go` command: + + $ go get github.com/satori/go.uuid + +## Requirements + +UUID package requires Go >= 1.2. + +## Example + +```go +package main + +import ( + "fmt" + "github.com/satori/go.uuid" +) + +func main() { + // Creating UUID Version 4 + u1 := uuid.NewV4() + fmt.Printf("UUIDv4: %s\n", u1) + + // Parsing UUID from string input + u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + fmt.Printf("Something gone wrong: %s", err) + } + fmt.Printf("Successfully parsed: %s", u2) +} +``` + +## Documentation + +[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. + +## Links +* [RFC 4122](http://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) + +## Copyright + +Copyright (C) 2013-2018 by Maxim Bublis . + +UUID package released under MIT License. +See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go new file mode 100644 index 00000000..656892c5 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/codec.go @@ -0,0 +1,206 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "encoding/hex" + "fmt" +) + +// FromBytes returns UUID converted from raw byte slice input. +// It will return error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (u UUID, err error) { + err = u.UnmarshalBinary(input) + return +} + +// FromBytesOrNil returns UUID converted from raw byte slice input. +// Same behavior as FromBytes, but returns a Nil UUID on error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns UUID parsed from string input. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (u UUID, err error) { + err = u.UnmarshalText([]byte(input)) + return +} + +// FromStringOrNil returns UUID parsed from string input. +// Same behavior as FromString, but returns a Nil UUID on error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by String. +func (u UUID) MarshalText() (text []byte, err error) { + text = []byte(u.String()) + return +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// ABNF for supported UUID text representation follows: +// uuid := canonical | hashlike | braced | urn +// plain := canonical | hashlike +// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct +// hashlike := 12hexoct +// braced := '{' plain '}' +// urn := URN ':' UUID-NID ':' plain +// URN := 'urn' +// UUID-NID := 'uuid' +// 12hexoct := 6hexoct 6hexoct +// 6hexoct := 4hexoct 2hexoct +// 4hexoct := 2hexoct 2hexoct +// 2hexoct := hexoct hexoct +// hexoct := hexdig hexdig +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +func (u *UUID) UnmarshalText(text []byte) (err error) { + switch len(text) { + case 32: + return u.decodeHashLike(text) + case 36: + return u.decodeCanonical(text) + case 38: + return u.decodeBraced(text) + case 41: + fallthrough + case 45: + return u.decodeURN(text) + default: + return fmt.Errorf("uuid: incorrect UUID length: %s", text) + } +} + +// decodeCanonical decodes UUID string in format +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". +func (u *UUID) decodeCanonical(t []byte) (err error) { + if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format %s", t) + } + + src := t[:] + dst := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 { + src = src[1:] // skip dash + } + _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup]) + if err != nil { + return + } + src = src[byteGroup:] + dst = dst[byteGroup/2:] + } + + return +} + +// decodeHashLike decodes UUID string in format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeHashLike(t []byte) (err error) { + src := t[:] + dst := u[:] + + if _, err = hex.Decode(dst, src); err != nil { + return err + } + return +} + +// decodeBraced decodes UUID string in format +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format +// "{6ba7b8109dad11d180b400c04fd430c8}". +func (u *UUID) decodeBraced(t []byte) (err error) { + l := len(t) + + if t[0] != '{' || t[l-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format %s", t) + } + + return u.decodePlain(t[1 : l-1]) +} + +// decodeURN decodes UUID string in format +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeURN(t []byte) (err error) { + total := len(t) + + urn_uuid_prefix := t[:9] + + if !bytes.Equal(urn_uuid_prefix, urnPrefix) { + return fmt.Errorf("uuid: incorrect UUID format: %s", t) + } + + return u.decodePlain(t[9:total]) +} + +// decodePlain decodes UUID string in canonical format +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodePlain(t []byte) (err error) { + switch len(t) { + case 32: + return u.decodeHashLike(t) + case 36: + return u.decodeCanonical(t) + default: + return fmt.Errorf("uuid: incorrrect UUID length: %s", t) + } +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() (data []byte, err error) { + data = u.Bytes() + return +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) (err error) { + if len(data) != Size { + err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + return + } + copy(u[:], data) + + return +} diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go new file mode 100644 index 00000000..3f2f1da2 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/generator.go @@ -0,0 +1,239 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/binary" + "hash" + "net" + "os" + "sync" + "time" +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +var ( + global = newDefaultGenerator() + + epochFunc = unixTimeFunc + posixUID = uint32(os.Getuid()) + posixGID = uint32(os.Getgid()) +) + +// NewV1 returns UUID based on current timestamp and MAC address. +func NewV1() UUID { + return global.NewV1() +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func NewV2(domain byte) UUID { + return global.NewV2(domain) +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + return global.NewV3(ns, name) +} + +// NewV4 returns random generated UUID. +func NewV4() UUID { + return global.NewV4() +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + return global.NewV5(ns, name) +} + +// Generator provides interface for generating UUIDs. +type Generator interface { + NewV1() UUID + NewV2(domain byte) UUID + NewV3(ns UUID, name string) UUID + NewV4() UUID + NewV5(ns UUID, name string) UUID +} + +// Default generator implementation. +type generator struct { + storageOnce sync.Once + storageMutex sync.Mutex + + lastTime uint64 + clockSequence uint16 + hardwareAddr [6]byte +} + +func newDefaultGenerator() Generator { + return &generator{} +} + +// NewV1 returns UUID based on current timestamp and MAC address. +func (g *generator) NewV1() UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := g.getStorage() + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + copy(u[10:], hardwareAddr) + + u.SetVersion(V1) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func (g *generator) NewV2(domain byte) UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := g.getStorage() + + switch domain { + case DomainPerson: + binary.BigEndian.PutUint32(u[0:], posixUID) + case DomainGroup: + binary.BigEndian.PutUint32(u[0:], posixGID) + } + + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + u[9] = domain + + copy(u[10:], hardwareAddr) + + u.SetVersion(V2) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func (g *generator) NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(V3) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV4 returns random generated UUID. +func (g *generator) NewV4() UUID { + u := UUID{} + g.safeRandom(u[:]) + u.SetVersion(V4) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func (g *generator) NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(V5) + u.SetVariant(VariantRFC4122) + + return u +} + +func (g *generator) initStorage() { + g.initClockSequence() + g.initHardwareAddr() +} + +func (g *generator) initClockSequence() { + buf := make([]byte, 2) + g.safeRandom(buf) + g.clockSequence = binary.BigEndian.Uint16(buf) +} + +func (g *generator) initHardwareAddr() { + interfaces, err := net.Interfaces() + if err == nil { + for _, iface := range interfaces { + if len(iface.HardwareAddr) >= 6 { + copy(g.hardwareAddr[:], iface.HardwareAddr) + return + } + } + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence + g.safeRandom(g.hardwareAddr[:]) + + // Set multicast bit as recommended in RFC 4122 + g.hardwareAddr[0] |= 0x01 +} + +func (g *generator) safeRandom(dest []byte) { + if _, err := rand.Read(dest); err != nil { + panic(err) + } +} + +// Returns UUID v1/v2 storage state. +// Returns epoch timestamp, clock sequence, and hardware address. +func (g *generator) getStorage() (uint64, uint16, []byte) { + g.storageOnce.Do(g.initStorage) + + g.storageMutex.Lock() + defer g.storageMutex.Unlock() + + timeNow := epochFunc() + // Clock changed backwards since last UUID generation. + // Should increase clock sequence. + if timeNow <= g.lastTime { + g.clockSequence++ + } + g.lastTime = timeNow + + return timeNow, g.clockSequence, g.hardwareAddr[:] +} + +// Returns difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and current time. +// This is default epoch calculation function. +func unixTimeFunc() uint64 { + return epochStart + uint64(time.Now().UnixNano()/100) +} + +// Returns UUID based on hashing of namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go new file mode 100644 index 00000000..56759d39 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/sql.go @@ -0,0 +1,78 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice is handled by UnmarshalBinary, while +// a longer byte slice or a string is handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + if len(src) == Size { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database +type NullUUID struct { + UUID UUID + Valid bool +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go new file mode 100644 index 00000000..a2b8e2ca --- /dev/null +++ b/vendor/github.com/satori/go.uuid/uuid.go @@ -0,0 +1,161 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementation of Universally Unique Identifier (UUID). +// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and +// version 2 (as specified in DCE 1.1). +package uuid + +import ( + "bytes" + "encoding/hex" +) + +// Size of a UUID in bytes. +const Size = 16 + +// UUID representation compliant with specification +// described in RFC 4122. +type UUID [Size]byte + +// UUID versions +const ( + _ byte = iota + V1 + V2 + V3 + V4 + V5 +) + +// UUID layout variants. +const ( + VariantNCS byte = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +// Nil is special form of UUID that is specified to have all +// 128 bits set to zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) +) + +// Equal returns true if u1 and u2 equals, otherwise returns false. +func Equal(u1 UUID, u2 UUID) bool { + return bytes.Equal(u1[:], u2[:]) +} + +// Version returns algorithm version used to generate UUID. +func (u UUID) Version() byte { + return u[6] >> 4 +} + +// Variant returns UUID layout variant. +func (u UUID) Variant() byte { + switch { + case (u[8] >> 7) == 0x00: + return VariantNCS + case (u[8] >> 6) == 0x02: + return VariantRFC4122 + case (u[8] >> 5) == 0x06: + return VariantMicrosoft + case (u[8] >> 5) == 0x07: + fallthrough + default: + return VariantFuture + } +} + +// Bytes returns bytes slice representation of UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// Returns canonical string representation of UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// SetVersion sets version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets variant bits. +func (u *UUID) SetVariant(v byte) { + switch v { + case VariantNCS: + u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) + case VariantRFC4122: + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + case VariantMicrosoft: + u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) + case VariantFuture: + fallthrough + default: + u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) + } +} + +// Must is a helper that wraps a call to a function returning (UUID, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")); +func Must(u UUID, err error) UUID { + if err != nil { + panic(err) + } + return u +} diff --git a/vendor/golang.org/x/crypto/otr/libotr_test_helper.c b/vendor/golang.org/x/crypto/otr/libotr_test_helper.c new file mode 100644 index 00000000..6423eeda --- /dev/null +++ b/vendor/golang.org/x/crypto/otr/libotr_test_helper.c @@ -0,0 +1,171 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code can be compiled and used to test the otr package against libotr. +// See otr_test.go. + +// +build ignore + +#include +#include +#include + +#include +#include + +static int g_session_established = 0; + +OtrlPolicy policy(void *opdata, ConnContext *context) { + return OTRL_POLICY_ALWAYS; +} + +int is_logged_in(void *opdata, const char *accountname, const char *protocol, const char *recipient) { + return 1; +} + +void inject_message(void *opdata, const char *accountname, const char *protocol, const char *recipient, const char *message) { + printf("%s\n", message); + fflush(stdout); + fprintf(stderr, "libotr helper sent: %s\n", message); +} + +void notify(void *opdata, OtrlNotifyLevel level, const char *accountname, const char *protocol, const char *username, const char *title, const char *primary, const char *secondary) { + fprintf(stderr, "NOTIFY: %s %s %s %s\n", username, title, primary, secondary); +} + +int display_otr_message(void *opdata, const char *accountname, const char *protocol, const char *username, const char *msg) { + fprintf(stderr, "MESSAGE: %s %s\n", username, msg); + return 1; +} + +void update_context_list(void *opdata) { +} + +const char *protocol_name(void *opdata, const char *protocol) { + return "PROTOCOL"; +} + +void protocol_name_free(void *opdata, const char *protocol_name) { +} + +void new_fingerprint(void *opdata, OtrlUserState us, const char *accountname, const char *protocol, const char *username, unsigned char fingerprint[20]) { + fprintf(stderr, "NEW FINGERPRINT\n"); + g_session_established = 1; +} + +void write_fingerprints(void *opdata) { +} + +void gone_secure(void *opdata, ConnContext *context) { +} + +void gone_insecure(void *opdata, ConnContext *context) { +} + +void still_secure(void *opdata, ConnContext *context, int is_reply) { +} + +void log_message(void *opdata, const char *message) { + fprintf(stderr, "MESSAGE: %s\n", message); +} + +int max_message_size(void *opdata, ConnContext *context) { + return 99999; +} + +const char *account_name(void *opdata, const char *account, const char *protocol) { + return "ACCOUNT"; +} + +void account_name_free(void *opdata, const char *account_name) { +} + +OtrlMessageAppOps uiops = { + policy, + NULL, + is_logged_in, + inject_message, + notify, + display_otr_message, + update_context_list, + protocol_name, + protocol_name_free, + new_fingerprint, + write_fingerprints, + gone_secure, + gone_insecure, + still_secure, + log_message, + max_message_size, + account_name, + account_name_free, +}; + +static const char kPrivateKeyData[] = "(privkeys (account (name \"account\") (protocol proto) (private-key (dsa (p #00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB8C031D3561FECEE72EBB4A090D450A9B7A857#) (q #00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#) (g #535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57597766A2F9CE3857D7ACE3E1E3BC1FC6F26#) (y #0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A3C0FF501E3DC673B76D7BABF349009B6ECF#) (x #14D0345A3562C480A039E3C72764F72D79043216#)))))\n"; + +int +main() { + OTRL_INIT; + + // We have to write the private key information to a file because the libotr + // API demands a filename to read from. + const char *tmpdir = "/tmp"; + if (getenv("TMP")) { + tmpdir = getenv("TMP"); + } + + char private_key_file[256]; + snprintf(private_key_file, sizeof(private_key_file), "%s/libotr_test_helper_privatekeys-XXXXXX", tmpdir); + int fd = mkstemp(private_key_file); + if (fd == -1) { + perror("creating temp file"); + } + write(fd, kPrivateKeyData, sizeof(kPrivateKeyData)-1); + close(fd); + + OtrlUserState userstate = otrl_userstate_create(); + otrl_privkey_read(userstate, private_key_file); + unlink(private_key_file); + + fprintf(stderr, "libotr helper started\n"); + + char buf[4096]; + + for (;;) { + char* message = fgets(buf, sizeof(buf), stdin); + if (strlen(message) == 0) { + break; + } + message[strlen(message) - 1] = 0; + fprintf(stderr, "libotr helper got: %s\n", message); + + char *newmessage = NULL; + OtrlTLV *tlvs; + int ignore_message = otrl_message_receiving(userstate, &uiops, NULL, "account", "proto", "peer", message, &newmessage, &tlvs, NULL, NULL); + if (tlvs) { + otrl_tlv_free(tlvs); + } + + if (newmessage != NULL) { + fprintf(stderr, "libotr got: %s\n", newmessage); + otrl_message_free(newmessage); + + gcry_error_t err; + char *newmessage = NULL; + + err = otrl_message_sending(userstate, &uiops, NULL, "account", "proto", "peer", "test message", NULL, &newmessage, NULL, NULL); + if (newmessage == NULL) { + fprintf(stderr, "libotr didn't encrypt message\n"); + return 1; + } + write(1, newmessage, strlen(newmessage)); + write(1, "\n", 1); + g_session_established = 0; + otrl_message_free(newmessage); + write(1, "?OTRv2?\n", 8); + } + } + + return 0; +} diff --git a/vendor/golang.org/x/crypto/otr/otr.go b/vendor/golang.org/x/crypto/otr/otr.go new file mode 100644 index 00000000..07ac080e --- /dev/null +++ b/vendor/golang.org/x/crypto/otr/otr.go @@ -0,0 +1,1400 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package otr implements the Off The Record protocol as specified in +// http://www.cypherpunks.ca/otr/Protocol-v2-3.1.0.html +package otr // import "golang.org/x/crypto/otr" + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/hmac" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "encoding/hex" + "errors" + "hash" + "io" + "math/big" + "strconv" +) + +// SecurityChange describes a change in the security state of a Conversation. +type SecurityChange int + +const ( + NoChange SecurityChange = iota + // NewKeys indicates that a key exchange has completed. This occurs + // when a conversation first becomes encrypted, and when the keys are + // renegotiated within an encrypted conversation. + NewKeys + // SMPSecretNeeded indicates that the peer has started an + // authentication and that we need to supply a secret. Call SMPQuestion + // to get the optional, human readable challenge and then Authenticate + // to supply the matching secret. + SMPSecretNeeded + // SMPComplete indicates that an authentication completed. The identity + // of the peer has now been confirmed. + SMPComplete + // SMPFailed indicates that an authentication failed. + SMPFailed + // ConversationEnded indicates that the peer ended the secure + // conversation. + ConversationEnded +) + +// QueryMessage can be sent to a peer to start an OTR conversation. +var QueryMessage = "?OTRv2?" + +// ErrorPrefix can be used to make an OTR error by appending an error message +// to it. +var ErrorPrefix = "?OTR Error:" + +var ( + fragmentPartSeparator = []byte(",") + fragmentPrefix = []byte("?OTR,") + msgPrefix = []byte("?OTR:") + queryMarker = []byte("?OTR") +) + +// isQuery attempts to parse an OTR query from msg and returns the greatest +// common version, or 0 if msg is not an OTR query. +func isQuery(msg []byte) (greatestCommonVersion int) { + pos := bytes.Index(msg, queryMarker) + if pos == -1 { + return 0 + } + for i, c := range msg[pos+len(queryMarker):] { + if i == 0 { + if c == '?' { + // Indicates support for version 1, but we don't + // implement that. + continue + } + + if c != 'v' { + // Invalid message + return 0 + } + + continue + } + + if c == '?' { + // End of message + return + } + + if c == ' ' || c == '\t' { + // Probably an invalid message + return 0 + } + + if c == '2' { + greatestCommonVersion = 2 + } + } + + return 0 +} + +const ( + statePlaintext = iota + stateEncrypted + stateFinished +) + +const ( + authStateNone = iota + authStateAwaitingDHKey + authStateAwaitingRevealSig + authStateAwaitingSig +) + +const ( + msgTypeDHCommit = 2 + msgTypeData = 3 + msgTypeDHKey = 10 + msgTypeRevealSig = 17 + msgTypeSig = 18 +) + +const ( + // If the requested fragment size is less than this, it will be ignored. + minFragmentSize = 18 + // Messages are padded to a multiple of this number of bytes. + paddingGranularity = 256 + // The number of bytes in a Diffie-Hellman private value (320-bits). + dhPrivateBytes = 40 + // The number of bytes needed to represent an element of the DSA + // subgroup (160-bits). + dsaSubgroupBytes = 20 + // The number of bytes of the MAC that are sent on the wire (160-bits). + macPrefixBytes = 20 +) + +// These are the global, common group parameters for OTR. +var ( + p *big.Int // group prime + g *big.Int // group generator + q *big.Int // group order + pMinus2 *big.Int +) + +func init() { + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", 16) + q, _ = new(big.Int).SetString("7FFFFFFFFFFFFFFFE487ED5110B4611A62633145C06E0E68948127044533E63A0105DF531D89CD9128A5043CC71A026EF7CA8CD9E69D218D98158536F92F8A1BA7F09AB6B6A8E122F242DABB312F3F637A262174D31BF6B585FFAE5B7A035BF6F71C35FDAD44CFD2D74F9208BE258FF324943328F6722D9EE1003E5C50B1DF82CC6D241B0E2AE9CD348B1FD47E9267AFC1B2AE91EE51D6CB0E3179AB1042A95DCF6A9483B84B4B36B3861AA7255E4C0278BA36046511B993FFFFFFFFFFFFFFFF", 16) + g = new(big.Int).SetInt64(2) + pMinus2 = new(big.Int).Sub(p, g) +} + +// Conversation represents a relation with a peer. The zero value is a valid +// Conversation, although PrivateKey must be set. +// +// When communicating with a peer, all inbound messages should be passed to +// Conversation.Receive and all outbound messages to Conversation.Send. The +// Conversation will take care of maintaining the encryption state and +// negotiating encryption as needed. +type Conversation struct { + // PrivateKey contains the private key to use to sign key exchanges. + PrivateKey *PrivateKey + + // Rand can be set to override the entropy source. Otherwise, + // crypto/rand will be used. + Rand io.Reader + // If FragmentSize is set, all messages produced by Receive and Send + // will be fragmented into messages of, at most, this number of bytes. + FragmentSize int + + // Once Receive has returned NewKeys once, the following fields are + // valid. + SSID [8]byte + TheirPublicKey PublicKey + + state, authState int + + r [16]byte + x, y *big.Int + gx, gy *big.Int + gxBytes []byte + digest [sha256.Size]byte + + revealKeys, sigKeys akeKeys + + myKeyId uint32 + myCurrentDHPub *big.Int + myCurrentDHPriv *big.Int + myLastDHPub *big.Int + myLastDHPriv *big.Int + + theirKeyId uint32 + theirCurrentDHPub *big.Int + theirLastDHPub *big.Int + + keySlots [4]keySlot + + myCounter [8]byte + theirLastCtr [8]byte + oldMACs []byte + + k, n int // fragment state + frag []byte + + smp smpState +} + +// A keySlot contains key material for a specific (their keyid, my keyid) pair. +type keySlot struct { + // used is true if this slot is valid. If false, it's free for reuse. + used bool + theirKeyId uint32 + myKeyId uint32 + sendAESKey, recvAESKey []byte + sendMACKey, recvMACKey []byte + theirLastCtr [8]byte +} + +// akeKeys are generated during key exchange. There's one set for the reveal +// signature message and another for the signature message. In the protocol +// spec the latter are indicated with a prime mark. +type akeKeys struct { + c [16]byte + m1, m2 [32]byte +} + +func (c *Conversation) rand() io.Reader { + if c.Rand != nil { + return c.Rand + } + return rand.Reader +} + +func (c *Conversation) randMPI(buf []byte) *big.Int { + _, err := io.ReadFull(c.rand(), buf) + if err != nil { + panic("otr: short read from random source") + } + + return new(big.Int).SetBytes(buf) +} + +// tlv represents the type-length value from the protocol. +type tlv struct { + typ, length uint16 + data []byte +} + +const ( + tlvTypePadding = 0 + tlvTypeDisconnected = 1 + tlvTypeSMP1 = 2 + tlvTypeSMP2 = 3 + tlvTypeSMP3 = 4 + tlvTypeSMP4 = 5 + tlvTypeSMPAbort = 6 + tlvTypeSMP1WithQuestion = 7 +) + +// Receive handles a message from a peer. It returns a human readable message, +// an indicator of whether that message was encrypted, a hint about the +// encryption state and zero or more messages to send back to the peer. +// These messages do not need to be passed to Send before transmission. +func (c *Conversation) Receive(in []byte) (out []byte, encrypted bool, change SecurityChange, toSend [][]byte, err error) { + if bytes.HasPrefix(in, fragmentPrefix) { + in, err = c.processFragment(in) + if in == nil || err != nil { + return + } + } + + if bytes.HasPrefix(in, msgPrefix) && in[len(in)-1] == '.' { + in = in[len(msgPrefix) : len(in)-1] + } else if version := isQuery(in); version > 0 { + c.authState = authStateAwaitingDHKey + c.myKeyId = 0 + toSend = c.encode(c.generateDHCommit()) + return + } else { + // plaintext message + out = in + return + } + + msg := make([]byte, base64.StdEncoding.DecodedLen(len(in))) + msgLen, err := base64.StdEncoding.Decode(msg, in) + if err != nil { + err = errors.New("otr: invalid base64 encoding in message") + return + } + msg = msg[:msgLen] + + // The first two bytes are the protocol version (2) + if len(msg) < 3 || msg[0] != 0 || msg[1] != 2 { + err = errors.New("otr: invalid OTR message") + return + } + + msgType := int(msg[2]) + msg = msg[3:] + + switch msgType { + case msgTypeDHCommit: + switch c.authState { + case authStateNone: + c.authState = authStateAwaitingRevealSig + if err = c.processDHCommit(msg); err != nil { + return + } + c.myKeyId = 0 + toSend = c.encode(c.generateDHKey()) + return + case authStateAwaitingDHKey: + // This is a 'SYN-crossing'. The greater digest wins. + var cmp int + if cmp, err = c.compareToDHCommit(msg); err != nil { + return + } + if cmp > 0 { + // We win. Retransmit DH commit. + toSend = c.encode(c.serializeDHCommit()) + return + } else { + // They win. We forget about our DH commit. + c.authState = authStateAwaitingRevealSig + if err = c.processDHCommit(msg); err != nil { + return + } + c.myKeyId = 0 + toSend = c.encode(c.generateDHKey()) + return + } + case authStateAwaitingRevealSig: + if err = c.processDHCommit(msg); err != nil { + return + } + toSend = c.encode(c.serializeDHKey()) + case authStateAwaitingSig: + if err = c.processDHCommit(msg); err != nil { + return + } + c.myKeyId = 0 + toSend = c.encode(c.generateDHKey()) + c.authState = authStateAwaitingRevealSig + default: + panic("bad state") + } + case msgTypeDHKey: + switch c.authState { + case authStateAwaitingDHKey: + var isSame bool + if isSame, err = c.processDHKey(msg); err != nil { + return + } + if isSame { + err = errors.New("otr: unexpected duplicate DH key") + return + } + toSend = c.encode(c.generateRevealSig()) + c.authState = authStateAwaitingSig + case authStateAwaitingSig: + var isSame bool + if isSame, err = c.processDHKey(msg); err != nil { + return + } + if isSame { + toSend = c.encode(c.serializeDHKey()) + } + } + case msgTypeRevealSig: + if c.authState != authStateAwaitingRevealSig { + return + } + if err = c.processRevealSig(msg); err != nil { + return + } + toSend = c.encode(c.generateSig()) + c.authState = authStateNone + c.state = stateEncrypted + change = NewKeys + case msgTypeSig: + if c.authState != authStateAwaitingSig { + return + } + if err = c.processSig(msg); err != nil { + return + } + c.authState = authStateNone + c.state = stateEncrypted + change = NewKeys + case msgTypeData: + if c.state != stateEncrypted { + err = errors.New("otr: encrypted message received without encrypted session established") + return + } + var tlvs []tlv + out, tlvs, err = c.processData(msg) + encrypted = true + + EachTLV: + for _, inTLV := range tlvs { + switch inTLV.typ { + case tlvTypeDisconnected: + change = ConversationEnded + c.state = stateFinished + break EachTLV + case tlvTypeSMP1, tlvTypeSMP2, tlvTypeSMP3, tlvTypeSMP4, tlvTypeSMPAbort, tlvTypeSMP1WithQuestion: + var reply tlv + var complete bool + reply, complete, err = c.processSMP(inTLV) + if err == smpSecretMissingError { + err = nil + change = SMPSecretNeeded + c.smp.saved = &inTLV + return + } else if err == smpFailureError { + err = nil + change = SMPFailed + return + } + if complete { + change = SMPComplete + } + if reply.typ != 0 { + toSend = c.encode(c.generateData(nil, &reply)) + } + break EachTLV + default: + // skip unknown TLVs + } + } + default: + err = errors.New("otr: unknown message type " + strconv.Itoa(msgType)) + } + + return +} + +// Send takes a human readable message from the local user, possibly encrypts +// it and returns zero one or more messages to send to the peer. +func (c *Conversation) Send(msg []byte) ([][]byte, error) { + switch c.state { + case statePlaintext: + return [][]byte{msg}, nil + case stateEncrypted: + return c.encode(c.generateData(msg, nil)), nil + case stateFinished: + return nil, errors.New("otr: cannot send message because secure conversation has finished") + } + + return nil, errors.New("otr: cannot send message in current state") +} + +// SMPQuestion returns the human readable challenge question from the peer. +// It's only valid after Receive has returned SMPSecretNeeded. +func (c *Conversation) SMPQuestion() string { + return c.smp.question +} + +// Authenticate begins an authentication with the peer. Authentication involves +// an optional challenge message and a shared secret. The authentication +// proceeds until either Receive returns SMPComplete, SMPSecretNeeded (which +// indicates that a new authentication is happening and thus this one was +// aborted) or SMPFailed. +func (c *Conversation) Authenticate(question string, mutualSecret []byte) (toSend [][]byte, err error) { + if c.state != stateEncrypted { + err = errors.New("otr: can't authenticate a peer without a secure conversation established") + return + } + + if c.smp.saved != nil { + c.calcSMPSecret(mutualSecret, false /* they started it */) + + var out tlv + var complete bool + out, complete, err = c.processSMP(*c.smp.saved) + if complete { + panic("SMP completed on the first message") + } + c.smp.saved = nil + if out.typ != 0 { + toSend = c.encode(c.generateData(nil, &out)) + } + return + } + + c.calcSMPSecret(mutualSecret, true /* we started it */) + outs := c.startSMP(question) + for _, out := range outs { + toSend = append(toSend, c.encode(c.generateData(nil, &out))...) + } + return +} + +// End ends a secure conversation by generating a termination message for +// the peer and switches to unencrypted communication. +func (c *Conversation) End() (toSend [][]byte) { + switch c.state { + case statePlaintext: + return nil + case stateEncrypted: + c.state = statePlaintext + return c.encode(c.generateData(nil, &tlv{typ: tlvTypeDisconnected})) + case stateFinished: + c.state = statePlaintext + return nil + } + panic("unreachable") +} + +// IsEncrypted returns true if a message passed to Send would be encrypted +// before transmission. This result remains valid until the next call to +// Receive or End, which may change the state of the Conversation. +func (c *Conversation) IsEncrypted() bool { + return c.state == stateEncrypted +} + +var fragmentError = errors.New("otr: invalid OTR fragment") + +// processFragment processes a fragmented OTR message and possibly returns a +// complete message. Fragmented messages look like "?OTR,k,n,msg," where k is +// the fragment number (starting from 1), n is the number of fragments in this +// message and msg is a substring of the base64 encoded message. +func (c *Conversation) processFragment(in []byte) (out []byte, err error) { + in = in[len(fragmentPrefix):] // remove "?OTR," + parts := bytes.Split(in, fragmentPartSeparator) + if len(parts) != 4 || len(parts[3]) != 0 { + return nil, fragmentError + } + + k, err := strconv.Atoi(string(parts[0])) + if err != nil { + return nil, fragmentError + } + + n, err := strconv.Atoi(string(parts[1])) + if err != nil { + return nil, fragmentError + } + + if k < 1 || n < 1 || k > n { + return nil, fragmentError + } + + if k == 1 { + c.frag = append(c.frag[:0], parts[2]...) + c.k, c.n = k, n + } else if n == c.n && k == c.k+1 { + c.frag = append(c.frag, parts[2]...) + c.k++ + } else { + c.frag = c.frag[:0] + c.n, c.k = 0, 0 + } + + if c.n > 0 && c.k == c.n { + c.n, c.k = 0, 0 + return c.frag, nil + } + + return nil, nil +} + +func (c *Conversation) generateDHCommit() []byte { + _, err := io.ReadFull(c.rand(), c.r[:]) + if err != nil { + panic("otr: short read from random source") + } + + var xBytes [dhPrivateBytes]byte + c.x = c.randMPI(xBytes[:]) + c.gx = new(big.Int).Exp(g, c.x, p) + c.gy = nil + c.gxBytes = appendMPI(nil, c.gx) + + h := sha256.New() + h.Write(c.gxBytes) + h.Sum(c.digest[:0]) + + aesCipher, err := aes.NewCipher(c.r[:]) + if err != nil { + panic(err.Error()) + } + + var iv [aes.BlockSize]byte + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(c.gxBytes, c.gxBytes) + + return c.serializeDHCommit() +} + +func (c *Conversation) serializeDHCommit() []byte { + var ret []byte + ret = appendU16(ret, 2) // protocol version + ret = append(ret, msgTypeDHCommit) + ret = appendData(ret, c.gxBytes) + ret = appendData(ret, c.digest[:]) + return ret +} + +func (c *Conversation) processDHCommit(in []byte) error { + var ok1, ok2 bool + c.gxBytes, in, ok1 = getData(in) + digest, in, ok2 := getData(in) + if !ok1 || !ok2 || len(in) > 0 { + return errors.New("otr: corrupt DH commit message") + } + copy(c.digest[:], digest) + return nil +} + +func (c *Conversation) compareToDHCommit(in []byte) (int, error) { + _, in, ok1 := getData(in) + digest, in, ok2 := getData(in) + if !ok1 || !ok2 || len(in) > 0 { + return 0, errors.New("otr: corrupt DH commit message") + } + return bytes.Compare(c.digest[:], digest), nil +} + +func (c *Conversation) generateDHKey() []byte { + var yBytes [dhPrivateBytes]byte + c.y = c.randMPI(yBytes[:]) + c.gy = new(big.Int).Exp(g, c.y, p) + return c.serializeDHKey() +} + +func (c *Conversation) serializeDHKey() []byte { + var ret []byte + ret = appendU16(ret, 2) // protocol version + ret = append(ret, msgTypeDHKey) + ret = appendMPI(ret, c.gy) + return ret +} + +func (c *Conversation) processDHKey(in []byte) (isSame bool, err error) { + gy, in, ok := getMPI(in) + if !ok { + err = errors.New("otr: corrupt DH key message") + return + } + if gy.Cmp(g) < 0 || gy.Cmp(pMinus2) > 0 { + err = errors.New("otr: DH value out of range") + return + } + if c.gy != nil { + isSame = c.gy.Cmp(gy) == 0 + return + } + c.gy = gy + return +} + +func (c *Conversation) generateEncryptedSignature(keys *akeKeys, xFirst bool) ([]byte, []byte) { + var xb []byte + xb = c.PrivateKey.PublicKey.Serialize(xb) + + var verifyData []byte + if xFirst { + verifyData = appendMPI(verifyData, c.gx) + verifyData = appendMPI(verifyData, c.gy) + } else { + verifyData = appendMPI(verifyData, c.gy) + verifyData = appendMPI(verifyData, c.gx) + } + verifyData = append(verifyData, xb...) + verifyData = appendU32(verifyData, c.myKeyId) + + mac := hmac.New(sha256.New, keys.m1[:]) + mac.Write(verifyData) + mb := mac.Sum(nil) + + xb = appendU32(xb, c.myKeyId) + xb = append(xb, c.PrivateKey.Sign(c.rand(), mb)...) + + aesCipher, err := aes.NewCipher(keys.c[:]) + if err != nil { + panic(err.Error()) + } + var iv [aes.BlockSize]byte + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(xb, xb) + + mac = hmac.New(sha256.New, keys.m2[:]) + encryptedSig := appendData(nil, xb) + mac.Write(encryptedSig) + + return encryptedSig, mac.Sum(nil) +} + +func (c *Conversation) generateRevealSig() []byte { + s := new(big.Int).Exp(c.gy, c.x, p) + c.calcAKEKeys(s) + c.myKeyId++ + + encryptedSig, mac := c.generateEncryptedSignature(&c.revealKeys, true /* gx comes first */) + + c.myCurrentDHPub = c.gx + c.myCurrentDHPriv = c.x + c.rotateDHKeys() + incCounter(&c.myCounter) + + var ret []byte + ret = appendU16(ret, 2) + ret = append(ret, msgTypeRevealSig) + ret = appendData(ret, c.r[:]) + ret = append(ret, encryptedSig...) + ret = append(ret, mac[:20]...) + return ret +} + +func (c *Conversation) processEncryptedSig(encryptedSig, theirMAC []byte, keys *akeKeys, xFirst bool) error { + mac := hmac.New(sha256.New, keys.m2[:]) + mac.Write(appendData(nil, encryptedSig)) + myMAC := mac.Sum(nil)[:20] + + if len(myMAC) != len(theirMAC) || subtle.ConstantTimeCompare(myMAC, theirMAC) == 0 { + return errors.New("bad signature MAC in encrypted signature") + } + + aesCipher, err := aes.NewCipher(keys.c[:]) + if err != nil { + panic(err.Error()) + } + var iv [aes.BlockSize]byte + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(encryptedSig, encryptedSig) + + sig := encryptedSig + sig, ok1 := c.TheirPublicKey.Parse(sig) + keyId, sig, ok2 := getU32(sig) + if !ok1 || !ok2 { + return errors.New("otr: corrupt encrypted signature") + } + + var verifyData []byte + if xFirst { + verifyData = appendMPI(verifyData, c.gx) + verifyData = appendMPI(verifyData, c.gy) + } else { + verifyData = appendMPI(verifyData, c.gy) + verifyData = appendMPI(verifyData, c.gx) + } + verifyData = c.TheirPublicKey.Serialize(verifyData) + verifyData = appendU32(verifyData, keyId) + + mac = hmac.New(sha256.New, keys.m1[:]) + mac.Write(verifyData) + mb := mac.Sum(nil) + + sig, ok1 = c.TheirPublicKey.Verify(mb, sig) + if !ok1 { + return errors.New("bad signature in encrypted signature") + } + if len(sig) > 0 { + return errors.New("corrupt encrypted signature") + } + + c.theirKeyId = keyId + zero(c.theirLastCtr[:]) + return nil +} + +func (c *Conversation) processRevealSig(in []byte) error { + r, in, ok1 := getData(in) + encryptedSig, in, ok2 := getData(in) + theirMAC := in + if !ok1 || !ok2 || len(theirMAC) != 20 { + return errors.New("otr: corrupt reveal signature message") + } + + aesCipher, err := aes.NewCipher(r) + if err != nil { + return errors.New("otr: cannot create AES cipher from reveal signature message: " + err.Error()) + } + var iv [aes.BlockSize]byte + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(c.gxBytes, c.gxBytes) + h := sha256.New() + h.Write(c.gxBytes) + digest := h.Sum(nil) + if len(digest) != len(c.digest) || subtle.ConstantTimeCompare(digest, c.digest[:]) == 0 { + return errors.New("otr: bad commit MAC in reveal signature message") + } + var rest []byte + c.gx, rest, ok1 = getMPI(c.gxBytes) + if !ok1 || len(rest) > 0 { + return errors.New("otr: gx corrupt after decryption") + } + if c.gx.Cmp(g) < 0 || c.gx.Cmp(pMinus2) > 0 { + return errors.New("otr: DH value out of range") + } + s := new(big.Int).Exp(c.gx, c.y, p) + c.calcAKEKeys(s) + + if err := c.processEncryptedSig(encryptedSig, theirMAC, &c.revealKeys, true /* gx comes first */); err != nil { + return errors.New("otr: in reveal signature message: " + err.Error()) + } + + c.theirCurrentDHPub = c.gx + c.theirLastDHPub = nil + + return nil +} + +func (c *Conversation) generateSig() []byte { + c.myKeyId++ + + encryptedSig, mac := c.generateEncryptedSignature(&c.sigKeys, false /* gy comes first */) + + c.myCurrentDHPub = c.gy + c.myCurrentDHPriv = c.y + c.rotateDHKeys() + incCounter(&c.myCounter) + + var ret []byte + ret = appendU16(ret, 2) + ret = append(ret, msgTypeSig) + ret = append(ret, encryptedSig...) + ret = append(ret, mac[:macPrefixBytes]...) + return ret +} + +func (c *Conversation) processSig(in []byte) error { + encryptedSig, in, ok1 := getData(in) + theirMAC := in + if !ok1 || len(theirMAC) != macPrefixBytes { + return errors.New("otr: corrupt signature message") + } + + if err := c.processEncryptedSig(encryptedSig, theirMAC, &c.sigKeys, false /* gy comes first */); err != nil { + return errors.New("otr: in signature message: " + err.Error()) + } + + c.theirCurrentDHPub = c.gy + c.theirLastDHPub = nil + + return nil +} + +func (c *Conversation) rotateDHKeys() { + // evict slots using our retired key id + for i := range c.keySlots { + slot := &c.keySlots[i] + if slot.used && slot.myKeyId == c.myKeyId-1 { + slot.used = false + c.oldMACs = append(c.oldMACs, slot.sendMACKey...) + c.oldMACs = append(c.oldMACs, slot.recvMACKey...) + } + } + + c.myLastDHPriv = c.myCurrentDHPriv + c.myLastDHPub = c.myCurrentDHPub + + var xBytes [dhPrivateBytes]byte + c.myCurrentDHPriv = c.randMPI(xBytes[:]) + c.myCurrentDHPub = new(big.Int).Exp(g, c.myCurrentDHPriv, p) + c.myKeyId++ +} + +func (c *Conversation) processData(in []byte) (out []byte, tlvs []tlv, err error) { + origIn := in + flags, in, ok1 := getU8(in) + theirKeyId, in, ok2 := getU32(in) + myKeyId, in, ok3 := getU32(in) + y, in, ok4 := getMPI(in) + counter, in, ok5 := getNBytes(in, 8) + encrypted, in, ok6 := getData(in) + macedData := origIn[:len(origIn)-len(in)] + theirMAC, in, ok7 := getNBytes(in, macPrefixBytes) + _, in, ok8 := getData(in) + if !ok1 || !ok2 || !ok3 || !ok4 || !ok5 || !ok6 || !ok7 || !ok8 || len(in) > 0 { + err = errors.New("otr: corrupt data message") + return + } + + ignoreErrors := flags&1 != 0 + + slot, err := c.calcDataKeys(myKeyId, theirKeyId) + if err != nil { + if ignoreErrors { + err = nil + } + return + } + + mac := hmac.New(sha1.New, slot.recvMACKey) + mac.Write([]byte{0, 2, 3}) + mac.Write(macedData) + myMAC := mac.Sum(nil) + if len(myMAC) != len(theirMAC) || subtle.ConstantTimeCompare(myMAC, theirMAC) == 0 { + if !ignoreErrors { + err = errors.New("otr: bad MAC on data message") + } + return + } + + if bytes.Compare(counter, slot.theirLastCtr[:]) <= 0 { + err = errors.New("otr: counter regressed") + return + } + copy(slot.theirLastCtr[:], counter) + + var iv [aes.BlockSize]byte + copy(iv[:], counter) + aesCipher, err := aes.NewCipher(slot.recvAESKey) + if err != nil { + panic(err.Error()) + } + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(encrypted, encrypted) + decrypted := encrypted + + if myKeyId == c.myKeyId { + c.rotateDHKeys() + } + if theirKeyId == c.theirKeyId { + // evict slots using their retired key id + for i := range c.keySlots { + slot := &c.keySlots[i] + if slot.used && slot.theirKeyId == theirKeyId-1 { + slot.used = false + c.oldMACs = append(c.oldMACs, slot.sendMACKey...) + c.oldMACs = append(c.oldMACs, slot.recvMACKey...) + } + } + + c.theirLastDHPub = c.theirCurrentDHPub + c.theirKeyId++ + c.theirCurrentDHPub = y + } + + if nulPos := bytes.IndexByte(decrypted, 0); nulPos >= 0 { + out = decrypted[:nulPos] + tlvData := decrypted[nulPos+1:] + for len(tlvData) > 0 { + var t tlv + var ok1, ok2, ok3 bool + + t.typ, tlvData, ok1 = getU16(tlvData) + t.length, tlvData, ok2 = getU16(tlvData) + t.data, tlvData, ok3 = getNBytes(tlvData, int(t.length)) + if !ok1 || !ok2 || !ok3 { + err = errors.New("otr: corrupt tlv data") + } + tlvs = append(tlvs, t) + } + } else { + out = decrypted + } + + return +} + +func (c *Conversation) generateData(msg []byte, extra *tlv) []byte { + slot, err := c.calcDataKeys(c.myKeyId-1, c.theirKeyId) + if err != nil { + panic("otr: failed to generate sending keys: " + err.Error()) + } + + var plaintext []byte + plaintext = append(plaintext, msg...) + plaintext = append(plaintext, 0) + + padding := paddingGranularity - ((len(plaintext) + 4) % paddingGranularity) + plaintext = appendU16(plaintext, tlvTypePadding) + plaintext = appendU16(plaintext, uint16(padding)) + for i := 0; i < padding; i++ { + plaintext = append(plaintext, 0) + } + + if extra != nil { + plaintext = appendU16(plaintext, extra.typ) + plaintext = appendU16(plaintext, uint16(len(extra.data))) + plaintext = append(plaintext, extra.data...) + } + + encrypted := make([]byte, len(plaintext)) + + var iv [aes.BlockSize]byte + copy(iv[:], c.myCounter[:]) + aesCipher, err := aes.NewCipher(slot.sendAESKey) + if err != nil { + panic(err.Error()) + } + ctr := cipher.NewCTR(aesCipher, iv[:]) + ctr.XORKeyStream(encrypted, plaintext) + + var ret []byte + ret = appendU16(ret, 2) + ret = append(ret, msgTypeData) + ret = append(ret, 0 /* flags */) + ret = appendU32(ret, c.myKeyId-1) + ret = appendU32(ret, c.theirKeyId) + ret = appendMPI(ret, c.myCurrentDHPub) + ret = append(ret, c.myCounter[:]...) + ret = appendData(ret, encrypted) + + mac := hmac.New(sha1.New, slot.sendMACKey) + mac.Write(ret) + ret = append(ret, mac.Sum(nil)[:macPrefixBytes]...) + ret = appendData(ret, c.oldMACs) + c.oldMACs = nil + incCounter(&c.myCounter) + + return ret +} + +func incCounter(counter *[8]byte) { + for i := 7; i >= 0; i-- { + counter[i]++ + if counter[i] > 0 { + break + } + } +} + +// calcDataKeys computes the keys used to encrypt a data message given the key +// IDs. +func (c *Conversation) calcDataKeys(myKeyId, theirKeyId uint32) (slot *keySlot, err error) { + // Check for a cache hit. + for i := range c.keySlots { + slot = &c.keySlots[i] + if slot.used && slot.theirKeyId == theirKeyId && slot.myKeyId == myKeyId { + return + } + } + + // Find an empty slot to write into. + slot = nil + for i := range c.keySlots { + if !c.keySlots[i].used { + slot = &c.keySlots[i] + break + } + } + if slot == nil { + err = errors.New("otr: internal error: no key slots") + return + } + + var myPriv, myPub, theirPub *big.Int + + if myKeyId == c.myKeyId { + myPriv = c.myCurrentDHPriv + myPub = c.myCurrentDHPub + } else if myKeyId == c.myKeyId-1 { + myPriv = c.myLastDHPriv + myPub = c.myLastDHPub + } else { + err = errors.New("otr: peer requested keyid " + strconv.FormatUint(uint64(myKeyId), 10) + " when I'm on " + strconv.FormatUint(uint64(c.myKeyId), 10)) + return + } + + if theirKeyId == c.theirKeyId { + theirPub = c.theirCurrentDHPub + } else if theirKeyId == c.theirKeyId-1 && c.theirLastDHPub != nil { + theirPub = c.theirLastDHPub + } else { + err = errors.New("otr: peer requested keyid " + strconv.FormatUint(uint64(myKeyId), 10) + " when they're on " + strconv.FormatUint(uint64(c.myKeyId), 10)) + return + } + + var sendPrefixByte, recvPrefixByte [1]byte + + if myPub.Cmp(theirPub) > 0 { + // we're the high end + sendPrefixByte[0], recvPrefixByte[0] = 1, 2 + } else { + // we're the low end + sendPrefixByte[0], recvPrefixByte[0] = 2, 1 + } + + s := new(big.Int).Exp(theirPub, myPriv, p) + sBytes := appendMPI(nil, s) + + h := sha1.New() + h.Write(sendPrefixByte[:]) + h.Write(sBytes) + slot.sendAESKey = h.Sum(slot.sendAESKey[:0])[:16] + + h.Reset() + h.Write(slot.sendAESKey) + slot.sendMACKey = h.Sum(slot.sendMACKey[:0]) + + h.Reset() + h.Write(recvPrefixByte[:]) + h.Write(sBytes) + slot.recvAESKey = h.Sum(slot.recvAESKey[:0])[:16] + + h.Reset() + h.Write(slot.recvAESKey) + slot.recvMACKey = h.Sum(slot.recvMACKey[:0]) + + zero(slot.theirLastCtr[:]) + return +} + +func (c *Conversation) calcAKEKeys(s *big.Int) { + mpi := appendMPI(nil, s) + h := sha256.New() + + var cBytes [32]byte + hashWithPrefix(c.SSID[:], 0, mpi, h) + + hashWithPrefix(cBytes[:], 1, mpi, h) + copy(c.revealKeys.c[:], cBytes[:16]) + copy(c.sigKeys.c[:], cBytes[16:]) + + hashWithPrefix(c.revealKeys.m1[:], 2, mpi, h) + hashWithPrefix(c.revealKeys.m2[:], 3, mpi, h) + hashWithPrefix(c.sigKeys.m1[:], 4, mpi, h) + hashWithPrefix(c.sigKeys.m2[:], 5, mpi, h) +} + +func hashWithPrefix(out []byte, prefix byte, in []byte, h hash.Hash) { + h.Reset() + var p [1]byte + p[0] = prefix + h.Write(p[:]) + h.Write(in) + if len(out) == h.Size() { + h.Sum(out[:0]) + } else { + digest := h.Sum(nil) + copy(out, digest) + } +} + +func (c *Conversation) encode(msg []byte) [][]byte { + b64 := make([]byte, base64.StdEncoding.EncodedLen(len(msg))+len(msgPrefix)+1) + base64.StdEncoding.Encode(b64[len(msgPrefix):], msg) + copy(b64, msgPrefix) + b64[len(b64)-1] = '.' + + if c.FragmentSize < minFragmentSize || len(b64) <= c.FragmentSize { + // We can encode this in a single fragment. + return [][]byte{b64} + } + + // We have to fragment this message. + var ret [][]byte + bytesPerFragment := c.FragmentSize - minFragmentSize + numFragments := (len(b64) + bytesPerFragment) / bytesPerFragment + + for i := 0; i < numFragments; i++ { + frag := []byte("?OTR," + strconv.Itoa(i+1) + "," + strconv.Itoa(numFragments) + ",") + todo := bytesPerFragment + if todo > len(b64) { + todo = len(b64) + } + frag = append(frag, b64[:todo]...) + b64 = b64[todo:] + frag = append(frag, ',') + ret = append(ret, frag) + } + + return ret +} + +type PublicKey struct { + dsa.PublicKey +} + +func (pk *PublicKey) Parse(in []byte) ([]byte, bool) { + var ok bool + var pubKeyType uint16 + + if pubKeyType, in, ok = getU16(in); !ok || pubKeyType != 0 { + return nil, false + } + if pk.P, in, ok = getMPI(in); !ok { + return nil, false + } + if pk.Q, in, ok = getMPI(in); !ok { + return nil, false + } + if pk.G, in, ok = getMPI(in); !ok { + return nil, false + } + if pk.Y, in, ok = getMPI(in); !ok { + return nil, false + } + + return in, true +} + +func (pk *PublicKey) Serialize(in []byte) []byte { + in = appendU16(in, 0) + in = appendMPI(in, pk.P) + in = appendMPI(in, pk.Q) + in = appendMPI(in, pk.G) + in = appendMPI(in, pk.Y) + return in +} + +// Fingerprint returns the 20-byte, binary fingerprint of the PublicKey. +func (pk *PublicKey) Fingerprint() []byte { + b := pk.Serialize(nil) + h := sha1.New() + h.Write(b[2:]) + return h.Sum(nil) +} + +func (pk *PublicKey) Verify(hashed, sig []byte) ([]byte, bool) { + if len(sig) != 2*dsaSubgroupBytes { + return nil, false + } + r := new(big.Int).SetBytes(sig[:dsaSubgroupBytes]) + s := new(big.Int).SetBytes(sig[dsaSubgroupBytes:]) + ok := dsa.Verify(&pk.PublicKey, hashed, r, s) + return sig[dsaSubgroupBytes*2:], ok +} + +type PrivateKey struct { + PublicKey + dsa.PrivateKey +} + +func (priv *PrivateKey) Sign(rand io.Reader, hashed []byte) []byte { + r, s, err := dsa.Sign(rand, &priv.PrivateKey, hashed) + if err != nil { + panic(err.Error()) + } + rBytes := r.Bytes() + sBytes := s.Bytes() + if len(rBytes) > dsaSubgroupBytes || len(sBytes) > dsaSubgroupBytes { + panic("DSA signature too large") + } + + out := make([]byte, 2*dsaSubgroupBytes) + copy(out[dsaSubgroupBytes-len(rBytes):], rBytes) + copy(out[len(out)-len(sBytes):], sBytes) + return out +} + +func (priv *PrivateKey) Serialize(in []byte) []byte { + in = priv.PublicKey.Serialize(in) + in = appendMPI(in, priv.PrivateKey.X) + return in +} + +func (priv *PrivateKey) Parse(in []byte) ([]byte, bool) { + in, ok := priv.PublicKey.Parse(in) + if !ok { + return in, ok + } + priv.PrivateKey.PublicKey = priv.PublicKey.PublicKey + priv.PrivateKey.X, in, ok = getMPI(in) + return in, ok +} + +func (priv *PrivateKey) Generate(rand io.Reader) { + if err := dsa.GenerateParameters(&priv.PrivateKey.PublicKey.Parameters, rand, dsa.L1024N160); err != nil { + panic(err.Error()) + } + if err := dsa.GenerateKey(&priv.PrivateKey, rand); err != nil { + panic(err.Error()) + } + priv.PublicKey.PublicKey = priv.PrivateKey.PublicKey +} + +func notHex(r rune) bool { + if r >= '0' && r <= '9' || + r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' { + return false + } + + return true +} + +// Import parses the contents of a libotr private key file. +func (priv *PrivateKey) Import(in []byte) bool { + mpiStart := []byte(" #") + + mpis := make([]*big.Int, 5) + + for i := 0; i < len(mpis); i++ { + start := bytes.Index(in, mpiStart) + if start == -1 { + return false + } + in = in[start+len(mpiStart):] + end := bytes.IndexFunc(in, notHex) + if end == -1 { + return false + } + hexBytes := in[:end] + in = in[end:] + + if len(hexBytes)&1 != 0 { + return false + } + + mpiBytes := make([]byte, len(hexBytes)/2) + if _, err := hex.Decode(mpiBytes, hexBytes); err != nil { + return false + } + + mpis[i] = new(big.Int).SetBytes(mpiBytes) + } + + priv.PrivateKey.P = mpis[0] + priv.PrivateKey.Q = mpis[1] + priv.PrivateKey.G = mpis[2] + priv.PrivateKey.Y = mpis[3] + priv.PrivateKey.X = mpis[4] + priv.PublicKey.PublicKey = priv.PrivateKey.PublicKey + + a := new(big.Int).Exp(priv.PrivateKey.G, priv.PrivateKey.X, priv.PrivateKey.P) + return a.Cmp(priv.PrivateKey.Y) == 0 +} + +func getU8(in []byte) (uint8, []byte, bool) { + if len(in) < 1 { + return 0, in, false + } + return in[0], in[1:], true +} + +func getU16(in []byte) (uint16, []byte, bool) { + if len(in) < 2 { + return 0, in, false + } + r := uint16(in[0])<<8 | uint16(in[1]) + return r, in[2:], true +} + +func getU32(in []byte) (uint32, []byte, bool) { + if len(in) < 4 { + return 0, in, false + } + r := uint32(in[0])<<24 | uint32(in[1])<<16 | uint32(in[2])<<8 | uint32(in[3]) + return r, in[4:], true +} + +func getMPI(in []byte) (*big.Int, []byte, bool) { + l, in, ok := getU32(in) + if !ok || uint32(len(in)) < l { + return nil, in, false + } + r := new(big.Int).SetBytes(in[:l]) + return r, in[l:], true +} + +func getData(in []byte) ([]byte, []byte, bool) { + l, in, ok := getU32(in) + if !ok || uint32(len(in)) < l { + return nil, in, false + } + return in[:l], in[l:], true +} + +func getNBytes(in []byte, n int) ([]byte, []byte, bool) { + if len(in) < n { + return nil, in, false + } + return in[:n], in[n:], true +} + +func appendU16(out []byte, v uint16) []byte { + out = append(out, byte(v>>8), byte(v)) + return out +} + +func appendU32(out []byte, v uint32) []byte { + out = append(out, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) + return out +} + +func appendData(out, v []byte) []byte { + out = appendU32(out, uint32(len(v))) + out = append(out, v...) + return out +} + +func appendMPI(out []byte, v *big.Int) []byte { + vBytes := v.Bytes() + out = appendU32(out, uint32(len(vBytes))) + out = append(out, vBytes...) + return out +} + +func appendMPIs(out []byte, mpis ...*big.Int) []byte { + for _, mpi := range mpis { + out = appendMPI(out, mpi) + } + return out +} + +func zero(b []byte) { + for i := range b { + b[i] = 0 + } +} diff --git a/vendor/golang.org/x/crypto/otr/smp.go b/vendor/golang.org/x/crypto/otr/smp.go new file mode 100644 index 00000000..dc6de4ee --- /dev/null +++ b/vendor/golang.org/x/crypto/otr/smp.go @@ -0,0 +1,572 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements the Socialist Millionaires Protocol as described in +// http://www.cypherpunks.ca/otr/Protocol-v2-3.1.0.html. The protocol +// specification is required in order to understand this code and, where +// possible, the variable names in the code match up with the spec. + +package otr + +import ( + "bytes" + "crypto/sha256" + "errors" + "hash" + "math/big" +) + +type smpFailure string + +func (s smpFailure) Error() string { + return string(s) +} + +var smpFailureError = smpFailure("otr: SMP protocol failed") +var smpSecretMissingError = smpFailure("otr: mutual secret needed") + +const smpVersion = 1 + +const ( + smpState1 = iota + smpState2 + smpState3 + smpState4 +) + +type smpState struct { + state int + a2, a3, b2, b3, pb, qb *big.Int + g2a, g3a *big.Int + g2, g3 *big.Int + g3b, papb, qaqb, ra *big.Int + saved *tlv + secret *big.Int + question string +} + +func (c *Conversation) startSMP(question string) (tlvs []tlv) { + if c.smp.state != smpState1 { + tlvs = append(tlvs, c.generateSMPAbort()) + } + tlvs = append(tlvs, c.generateSMP1(question)) + c.smp.question = "" + c.smp.state = smpState2 + return +} + +func (c *Conversation) resetSMP() { + c.smp.state = smpState1 + c.smp.secret = nil + c.smp.question = "" +} + +func (c *Conversation) processSMP(in tlv) (out tlv, complete bool, err error) { + data := in.data + + switch in.typ { + case tlvTypeSMPAbort: + if c.smp.state != smpState1 { + err = smpFailureError + } + c.resetSMP() + return + case tlvTypeSMP1WithQuestion: + // We preprocess this into a SMP1 message. + nulPos := bytes.IndexByte(data, 0) + if nulPos == -1 { + err = errors.New("otr: SMP message with question didn't contain a NUL byte") + return + } + c.smp.question = string(data[:nulPos]) + data = data[nulPos+1:] + } + + numMPIs, data, ok := getU32(data) + if !ok || numMPIs > 20 { + err = errors.New("otr: corrupt SMP message") + return + } + + mpis := make([]*big.Int, numMPIs) + for i := range mpis { + var ok bool + mpis[i], data, ok = getMPI(data) + if !ok { + err = errors.New("otr: corrupt SMP message") + return + } + } + + switch in.typ { + case tlvTypeSMP1, tlvTypeSMP1WithQuestion: + if c.smp.state != smpState1 { + c.resetSMP() + out = c.generateSMPAbort() + return + } + if c.smp.secret == nil { + err = smpSecretMissingError + return + } + if err = c.processSMP1(mpis); err != nil { + return + } + c.smp.state = smpState3 + out = c.generateSMP2() + case tlvTypeSMP2: + if c.smp.state != smpState2 { + c.resetSMP() + out = c.generateSMPAbort() + return + } + if out, err = c.processSMP2(mpis); err != nil { + out = c.generateSMPAbort() + return + } + c.smp.state = smpState4 + case tlvTypeSMP3: + if c.smp.state != smpState3 { + c.resetSMP() + out = c.generateSMPAbort() + return + } + if out, err = c.processSMP3(mpis); err != nil { + return + } + c.smp.state = smpState1 + c.smp.secret = nil + complete = true + case tlvTypeSMP4: + if c.smp.state != smpState4 { + c.resetSMP() + out = c.generateSMPAbort() + return + } + if err = c.processSMP4(mpis); err != nil { + out = c.generateSMPAbort() + return + } + c.smp.state = smpState1 + c.smp.secret = nil + complete = true + default: + panic("unknown SMP message") + } + + return +} + +func (c *Conversation) calcSMPSecret(mutualSecret []byte, weStarted bool) { + h := sha256.New() + h.Write([]byte{smpVersion}) + if weStarted { + h.Write(c.PrivateKey.PublicKey.Fingerprint()) + h.Write(c.TheirPublicKey.Fingerprint()) + } else { + h.Write(c.TheirPublicKey.Fingerprint()) + h.Write(c.PrivateKey.PublicKey.Fingerprint()) + } + h.Write(c.SSID[:]) + h.Write(mutualSecret) + c.smp.secret = new(big.Int).SetBytes(h.Sum(nil)) +} + +func (c *Conversation) generateSMP1(question string) tlv { + var randBuf [16]byte + c.smp.a2 = c.randMPI(randBuf[:]) + c.smp.a3 = c.randMPI(randBuf[:]) + g2a := new(big.Int).Exp(g, c.smp.a2, p) + g3a := new(big.Int).Exp(g, c.smp.a3, p) + h := sha256.New() + + r2 := c.randMPI(randBuf[:]) + r := new(big.Int).Exp(g, r2, p) + c2 := new(big.Int).SetBytes(hashMPIs(h, 1, r)) + d2 := new(big.Int).Mul(c.smp.a2, c2) + d2.Sub(r2, d2) + d2.Mod(d2, q) + if d2.Sign() < 0 { + d2.Add(d2, q) + } + + r3 := c.randMPI(randBuf[:]) + r.Exp(g, r3, p) + c3 := new(big.Int).SetBytes(hashMPIs(h, 2, r)) + d3 := new(big.Int).Mul(c.smp.a3, c3) + d3.Sub(r3, d3) + d3.Mod(d3, q) + if d3.Sign() < 0 { + d3.Add(d3, q) + } + + var ret tlv + if len(question) > 0 { + ret.typ = tlvTypeSMP1WithQuestion + ret.data = append(ret.data, question...) + ret.data = append(ret.data, 0) + } else { + ret.typ = tlvTypeSMP1 + } + ret.data = appendU32(ret.data, 6) + ret.data = appendMPIs(ret.data, g2a, c2, d2, g3a, c3, d3) + return ret +} + +func (c *Conversation) processSMP1(mpis []*big.Int) error { + if len(mpis) != 6 { + return errors.New("otr: incorrect number of arguments in SMP1 message") + } + g2a := mpis[0] + c2 := mpis[1] + d2 := mpis[2] + g3a := mpis[3] + c3 := mpis[4] + d3 := mpis[5] + h := sha256.New() + + r := new(big.Int).Exp(g, d2, p) + s := new(big.Int).Exp(g2a, c2, p) + r.Mul(r, s) + r.Mod(r, p) + t := new(big.Int).SetBytes(hashMPIs(h, 1, r)) + if c2.Cmp(t) != 0 { + return errors.New("otr: ZKP c2 incorrect in SMP1 message") + } + r.Exp(g, d3, p) + s.Exp(g3a, c3, p) + r.Mul(r, s) + r.Mod(r, p) + t.SetBytes(hashMPIs(h, 2, r)) + if c3.Cmp(t) != 0 { + return errors.New("otr: ZKP c3 incorrect in SMP1 message") + } + + c.smp.g2a = g2a + c.smp.g3a = g3a + return nil +} + +func (c *Conversation) generateSMP2() tlv { + var randBuf [16]byte + b2 := c.randMPI(randBuf[:]) + c.smp.b3 = c.randMPI(randBuf[:]) + r2 := c.randMPI(randBuf[:]) + r3 := c.randMPI(randBuf[:]) + r4 := c.randMPI(randBuf[:]) + r5 := c.randMPI(randBuf[:]) + r6 := c.randMPI(randBuf[:]) + + g2b := new(big.Int).Exp(g, b2, p) + g3b := new(big.Int).Exp(g, c.smp.b3, p) + + r := new(big.Int).Exp(g, r2, p) + h := sha256.New() + c2 := new(big.Int).SetBytes(hashMPIs(h, 3, r)) + d2 := new(big.Int).Mul(b2, c2) + d2.Sub(r2, d2) + d2.Mod(d2, q) + if d2.Sign() < 0 { + d2.Add(d2, q) + } + + r.Exp(g, r3, p) + c3 := new(big.Int).SetBytes(hashMPIs(h, 4, r)) + d3 := new(big.Int).Mul(c.smp.b3, c3) + d3.Sub(r3, d3) + d3.Mod(d3, q) + if d3.Sign() < 0 { + d3.Add(d3, q) + } + + c.smp.g2 = new(big.Int).Exp(c.smp.g2a, b2, p) + c.smp.g3 = new(big.Int).Exp(c.smp.g3a, c.smp.b3, p) + c.smp.pb = new(big.Int).Exp(c.smp.g3, r4, p) + c.smp.qb = new(big.Int).Exp(g, r4, p) + r.Exp(c.smp.g2, c.smp.secret, p) + c.smp.qb.Mul(c.smp.qb, r) + c.smp.qb.Mod(c.smp.qb, p) + + s := new(big.Int) + s.Exp(c.smp.g2, r6, p) + r.Exp(g, r5, p) + s.Mul(r, s) + s.Mod(s, p) + r.Exp(c.smp.g3, r5, p) + cp := new(big.Int).SetBytes(hashMPIs(h, 5, r, s)) + + // D5 = r5 - r4 cP mod q and D6 = r6 - y cP mod q + + s.Mul(r4, cp) + r.Sub(r5, s) + d5 := new(big.Int).Mod(r, q) + if d5.Sign() < 0 { + d5.Add(d5, q) + } + + s.Mul(c.smp.secret, cp) + r.Sub(r6, s) + d6 := new(big.Int).Mod(r, q) + if d6.Sign() < 0 { + d6.Add(d6, q) + } + + var ret tlv + ret.typ = tlvTypeSMP2 + ret.data = appendU32(ret.data, 11) + ret.data = appendMPIs(ret.data, g2b, c2, d2, g3b, c3, d3, c.smp.pb, c.smp.qb, cp, d5, d6) + return ret +} + +func (c *Conversation) processSMP2(mpis []*big.Int) (out tlv, err error) { + if len(mpis) != 11 { + err = errors.New("otr: incorrect number of arguments in SMP2 message") + return + } + g2b := mpis[0] + c2 := mpis[1] + d2 := mpis[2] + g3b := mpis[3] + c3 := mpis[4] + d3 := mpis[5] + pb := mpis[6] + qb := mpis[7] + cp := mpis[8] + d5 := mpis[9] + d6 := mpis[10] + h := sha256.New() + + r := new(big.Int).Exp(g, d2, p) + s := new(big.Int).Exp(g2b, c2, p) + r.Mul(r, s) + r.Mod(r, p) + s.SetBytes(hashMPIs(h, 3, r)) + if c2.Cmp(s) != 0 { + err = errors.New("otr: ZKP c2 failed in SMP2 message") + return + } + + r.Exp(g, d3, p) + s.Exp(g3b, c3, p) + r.Mul(r, s) + r.Mod(r, p) + s.SetBytes(hashMPIs(h, 4, r)) + if c3.Cmp(s) != 0 { + err = errors.New("otr: ZKP c3 failed in SMP2 message") + return + } + + c.smp.g2 = new(big.Int).Exp(g2b, c.smp.a2, p) + c.smp.g3 = new(big.Int).Exp(g3b, c.smp.a3, p) + + r.Exp(g, d5, p) + s.Exp(c.smp.g2, d6, p) + r.Mul(r, s) + s.Exp(qb, cp, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(c.smp.g3, d5, p) + t := new(big.Int).Exp(pb, cp, p) + s.Mul(s, t) + s.Mod(s, p) + t.SetBytes(hashMPIs(h, 5, s, r)) + if cp.Cmp(t) != 0 { + err = errors.New("otr: ZKP cP failed in SMP2 message") + return + } + + var randBuf [16]byte + r4 := c.randMPI(randBuf[:]) + r5 := c.randMPI(randBuf[:]) + r6 := c.randMPI(randBuf[:]) + r7 := c.randMPI(randBuf[:]) + + pa := new(big.Int).Exp(c.smp.g3, r4, p) + r.Exp(c.smp.g2, c.smp.secret, p) + qa := new(big.Int).Exp(g, r4, p) + qa.Mul(qa, r) + qa.Mod(qa, p) + + r.Exp(g, r5, p) + s.Exp(c.smp.g2, r6, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(c.smp.g3, r5, p) + cp.SetBytes(hashMPIs(h, 6, s, r)) + + r.Mul(r4, cp) + d5 = new(big.Int).Sub(r5, r) + d5.Mod(d5, q) + if d5.Sign() < 0 { + d5.Add(d5, q) + } + + r.Mul(c.smp.secret, cp) + d6 = new(big.Int).Sub(r6, r) + d6.Mod(d6, q) + if d6.Sign() < 0 { + d6.Add(d6, q) + } + + r.ModInverse(qb, p) + qaqb := new(big.Int).Mul(qa, r) + qaqb.Mod(qaqb, p) + + ra := new(big.Int).Exp(qaqb, c.smp.a3, p) + r.Exp(qaqb, r7, p) + s.Exp(g, r7, p) + cr := new(big.Int).SetBytes(hashMPIs(h, 7, s, r)) + + r.Mul(c.smp.a3, cr) + d7 := new(big.Int).Sub(r7, r) + d7.Mod(d7, q) + if d7.Sign() < 0 { + d7.Add(d7, q) + } + + c.smp.g3b = g3b + c.smp.qaqb = qaqb + + r.ModInverse(pb, p) + c.smp.papb = new(big.Int).Mul(pa, r) + c.smp.papb.Mod(c.smp.papb, p) + c.smp.ra = ra + + out.typ = tlvTypeSMP3 + out.data = appendU32(out.data, 8) + out.data = appendMPIs(out.data, pa, qa, cp, d5, d6, ra, cr, d7) + return +} + +func (c *Conversation) processSMP3(mpis []*big.Int) (out tlv, err error) { + if len(mpis) != 8 { + err = errors.New("otr: incorrect number of arguments in SMP3 message") + return + } + pa := mpis[0] + qa := mpis[1] + cp := mpis[2] + d5 := mpis[3] + d6 := mpis[4] + ra := mpis[5] + cr := mpis[6] + d7 := mpis[7] + h := sha256.New() + + r := new(big.Int).Exp(g, d5, p) + s := new(big.Int).Exp(c.smp.g2, d6, p) + r.Mul(r, s) + s.Exp(qa, cp, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(c.smp.g3, d5, p) + t := new(big.Int).Exp(pa, cp, p) + s.Mul(s, t) + s.Mod(s, p) + t.SetBytes(hashMPIs(h, 6, s, r)) + if t.Cmp(cp) != 0 { + err = errors.New("otr: ZKP cP failed in SMP3 message") + return + } + + r.ModInverse(c.smp.qb, p) + qaqb := new(big.Int).Mul(qa, r) + qaqb.Mod(qaqb, p) + + r.Exp(qaqb, d7, p) + s.Exp(ra, cr, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(g, d7, p) + t.Exp(c.smp.g3a, cr, p) + s.Mul(s, t) + s.Mod(s, p) + t.SetBytes(hashMPIs(h, 7, s, r)) + if t.Cmp(cr) != 0 { + err = errors.New("otr: ZKP cR failed in SMP3 message") + return + } + + var randBuf [16]byte + r7 := c.randMPI(randBuf[:]) + rb := new(big.Int).Exp(qaqb, c.smp.b3, p) + + r.Exp(qaqb, r7, p) + s.Exp(g, r7, p) + cr = new(big.Int).SetBytes(hashMPIs(h, 8, s, r)) + + r.Mul(c.smp.b3, cr) + d7 = new(big.Int).Sub(r7, r) + d7.Mod(d7, q) + if d7.Sign() < 0 { + d7.Add(d7, q) + } + + out.typ = tlvTypeSMP4 + out.data = appendU32(out.data, 3) + out.data = appendMPIs(out.data, rb, cr, d7) + + r.ModInverse(c.smp.pb, p) + r.Mul(pa, r) + r.Mod(r, p) + s.Exp(ra, c.smp.b3, p) + if r.Cmp(s) != 0 { + err = smpFailureError + } + + return +} + +func (c *Conversation) processSMP4(mpis []*big.Int) error { + if len(mpis) != 3 { + return errors.New("otr: incorrect number of arguments in SMP4 message") + } + rb := mpis[0] + cr := mpis[1] + d7 := mpis[2] + h := sha256.New() + + r := new(big.Int).Exp(c.smp.qaqb, d7, p) + s := new(big.Int).Exp(rb, cr, p) + r.Mul(r, s) + r.Mod(r, p) + + s.Exp(g, d7, p) + t := new(big.Int).Exp(c.smp.g3b, cr, p) + s.Mul(s, t) + s.Mod(s, p) + t.SetBytes(hashMPIs(h, 8, s, r)) + if t.Cmp(cr) != 0 { + return errors.New("otr: ZKP cR failed in SMP4 message") + } + + r.Exp(rb, c.smp.a3, p) + if r.Cmp(c.smp.papb) != 0 { + return smpFailureError + } + + return nil +} + +func (c *Conversation) generateSMPAbort() tlv { + return tlv{typ: tlvTypeSMPAbort} +} + +func hashMPIs(h hash.Hash, magic byte, mpis ...*big.Int) []byte { + if h != nil { + h.Reset() + } else { + h = sha256.New() + } + + h.Write([]byte{magic}) + for _, mpi := range mpis { + h.Write(appendMPI(nil, mpi)) + } + return h.Sum(nil) +} From 9986e8ca7c41dbf3364b962f163a142be8b16841 Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Thu, 17 Nov 2016 12:28:05 -0600 Subject: [PATCH 100/276] adds support for oci manifests and manifestlists Signed-off-by: Mike Brown --- manifest/manifestlist/manifestlist.go | 28 +- manifest/manifestlist/manifestlist_test.go | 103 ++++++- manifest/ocischema/builder.go | 80 ++++++ manifest/ocischema/builder_test.go | 210 ++++++++++++++ manifest/ocischema/manifest.go | 133 +++++++++ manifest/ocischema/manifest_test.go | 111 ++++++++ registry/handlers/api_test.go | 30 +- registry/handlers/manifests.go | 316 ++++++++++++++++++--- registry/storage/driver/storagedriver.go | 2 +- registry/storage/manifeststore.go | 6 +- 10 files changed, 957 insertions(+), 62 deletions(-) create mode 100644 manifest/ocischema/builder.go create mode 100644 manifest/ocischema/builder_test.go create mode 100644 manifest/ocischema/manifest.go create mode 100644 manifest/ocischema/manifest_test.go diff --git a/manifest/manifestlist/manifestlist.go b/manifest/manifestlist/manifestlist.go index 3aa0662d..56f6aa61 100644 --- a/manifest/manifestlist/manifestlist.go +++ b/manifest/manifestlist/manifestlist.go @@ -7,11 +7,17 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/ocischema" "github.com/opencontainers/go-digest" ) -// MediaTypeManifestList specifies the mediaType for manifest lists. -const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" +const ( + // MediaTypeManifestList specifies the mediaType for manifest lists. + MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + // MediaTypeOCIManifestList specifies the mediaType for OCI compliant manifest + // lists. + MediaTypeOCIManifestList = "application/vnd.oci.image.manifest.list.v1+json" +) // SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest. @@ -20,6 +26,13 @@ var SchemaVersion = manifest.Versioned{ MediaType: MediaTypeManifestList, } +// OCISchemaVersion provides a pre-initialized version structure for this +// packages OCIschema version of the manifest. +var OCISchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeOCIManifestList, +} + func init() { manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { m := new(DeserializedManifestList) @@ -105,8 +118,15 @@ type DeserializedManifestList struct { // DeserializedManifestList which contains the resulting manifest list // and its JSON representation. func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { - m := ManifestList{ - Versioned: SchemaVersion, + var m ManifestList + if len(descriptors) > 0 && descriptors[0].Descriptor.MediaType == ocischema.MediaTypeManifest { + m = ManifestList{ + Versioned: OCISchemaVersion, + } + } else { + m = ManifestList{ + Versioned: SchemaVersion, + } } m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) diff --git a/manifest/manifestlist/manifestlist_test.go b/manifest/manifestlist/manifestlist_test.go index 09e6ed1f..1d24aef2 100644 --- a/manifest/manifestlist/manifestlist_test.go +++ b/manifest/manifestlist/manifestlist_test.go @@ -69,7 +69,7 @@ func TestManifestList(t *testing.T) { t.Fatalf("error creating DeserializedManifestList: %v", err) } - mediaType, canonical, err := deserialized.Payload() + mediaType, canonical, _ := deserialized.Payload() if mediaType != MediaTypeManifestList { t.Fatalf("unexpected media type: %s", mediaType) @@ -109,3 +109,104 @@ func TestManifestList(t *testing.T) { } } } + +var expectedOCIManifestListSerialization = []byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.list.v1+json", + "manifests": [ + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "size": 985, + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + "platform": { + "architecture": "amd64", + "os": "linux", + "features": [ + "sse4" + ] + } + }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "size": 2392, + "digest": "sha256:6346340964309634683409684360934680934608934608934608934068934608", + "platform": { + "architecture": "sun4m", + "os": "sunos" + } + } + ] +}`) + +func TestOCIManifestList(t *testing.T) { + manifestDescriptors := []ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + MediaType: "application/vnd.oci.image.manifest.v1+json", + }, + Platform: PlatformSpec{ + Architecture: "amd64", + OS: "linux", + Features: []string{"sse4"}, + }, + }, + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608", + Size: 2392, + MediaType: "application/vnd.oci.image.manifest.v1+json", + }, + Platform: PlatformSpec{ + Architecture: "sun4m", + OS: "sunos", + }, + }, + } + + deserialized, err := FromDescriptors(manifestDescriptors) + if err != nil { + t.Fatalf("error creating DeserializedManifestList: %v", err) + } + + mediaType, canonical, _ := deserialized.Payload() + + if mediaType != MediaTypeOCIManifestList { + t.Fatalf("unexpected media type: %s", mediaType) + } + + // Check that the canonical field is the same as json.MarshalIndent + // with these parameters. + p, err := json.MarshalIndent(&deserialized.ManifestList, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest list: %v", err) + } + if !bytes.Equal(p, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p)) + } + + // Check that the canonical field has the expected value. + if !bytes.Equal(expectedOCIManifestListSerialization, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedOCIManifestListSerialization)) + } + + var unmarshalled DeserializedManifestList + if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { + t.Fatalf("error unmarshaling manifest: %v", err) + } + + if !reflect.DeepEqual(&unmarshalled, deserialized) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) + } + + references := deserialized.References() + if len(references) != 2 { + t.Fatalf("unexpected number of references: %d", len(references)) + } + for i := range references { + if !reflect.DeepEqual(references[i], manifestDescriptors[i].Descriptor) { + t.Fatalf("unexpected value %d returned by References: %v", i, references[i]) + } + } +} diff --git a/manifest/ocischema/builder.go b/manifest/ocischema/builder.go new file mode 100644 index 00000000..5318c139 --- /dev/null +++ b/manifest/ocischema/builder.go @@ -0,0 +1,80 @@ +package ocischema + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/opencontainers/go-digest" +) + +// builder is a type for constructing manifests. +type builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configJSON references + configJSON []byte + + // layers is a list of layer descriptors that gets built by successive + // calls to AppendReference. + layers []distribution.Descriptor +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process. +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { + mb := &builder{ + bs: bs, + configJSON: make([]byte, len(configJSON)), + } + copy(mb.configJSON, configJSON) + + return mb +} + +// Build produces a final manifest from the given references. +func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: SchemaVersion, + Layers: make([]distribution.Descriptor, len(mb.layers)), + } + copy(m.Layers, mb.layers) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = MediaTypeConfig + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = MediaTypeConfig + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *builder) AppendReference(d distribution.Describable) error { + mb.layers = append(mb.layers, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *builder) References() []distribution.Descriptor { + return mb.layers +} diff --git a/manifest/ocischema/builder_test.go b/manifest/ocischema/builder_test.go new file mode 100644 index 00000000..7192ac59 --- /dev/null +++ b/manifest/ocischema/builder_test.go @@ -0,0 +1,210 @@ +package ocischema + +import ( + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/opencontainers/go-digest" +) + +type mockBlobService struct { + descriptors map[digest.Digest]distribution.Descriptor +} + +func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if descriptor, ok := bs.descriptors[dgst]; ok { + return descriptor, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} + +func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + d := distribution.Descriptor{ + Digest: digest.FromBytes(p), + Size: int64(len(p)), + MediaType: "application/octet-stream", + } + bs.descriptors[d.Digest] = d + return d, nil +} + +func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func TestBuilder(t *testing.T) { + imgJSON := []byte(`{ + "architecture": "amd64", + "config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "echo hi" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", + "container_config": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "derived=true", + "asdf=true" + ], + "Hostname": "23304fc829f9", + "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", + "Labels": {}, + "OnBuild": [], + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "created": "2015-11-04T23:06:32.365666163Z", + "docker_version": "1.9.0-dev", + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" + }, + { + "created": "2015-11-04T23:06:30.934316144Z", + "created_by": "/bin/sh -c #(nop) ENV derived=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:31.192097572Z", + "created_by": "/bin/sh -c #(nop) ENV asdf=true", + "empty_layer": true + }, + { + "created": "2015-11-04T23:06:32.083868454Z", + "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" + }, + { + "created": "2015-11-04T23:06:32.365666163Z", + "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", + "empty_layer": true + } + ], + "os": "linux", + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", + "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" + ], + "type": "layers" + } +}`) + configDigest := digest.FromBytes(imgJSON) + + descriptors := []distribution.Descriptor{ + { + Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + Size: 5312, + MediaType: MediaTypeLayer, + }, + { + Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + Size: 235231, + MediaType: MediaTypeLayer, + }, + { + Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + Size: 639152, + MediaType: MediaTypeLayer, + }, + } + + bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} + builder := NewManifestBuilder(bs, imgJSON) + + for _, d := range descriptors { + if err := builder.AppendReference(d); err != nil { + t.Fatalf("AppendReference returned error: %v", err) + } + } + + built, err := builder.Build(context.Background()) + if err != nil { + t.Fatalf("Build returned error: %v", err) + } + + // Check that the config was put in the blob store + _, err = bs.Stat(context.Background(), configDigest) + if err != nil { + t.Fatal("config was not put in the blob store") + } + + manifest := built.(*DeserializedManifest).Manifest + + if manifest.Versioned.SchemaVersion != 2 { + t.Fatal("SchemaVersion != 2") + } + + target := manifest.Target() + if target.Digest != configDigest { + t.Fatalf("unexpected digest in target: %s", target.Digest.String()) + } + if target.MediaType != MediaTypeConfig { + t.Fatalf("unexpected media type in target: %s", target.MediaType) + } + if target.Size != 3153 { + t.Fatalf("unexpected size in target: %d", target.Size) + } + + references := manifest.References() + expected := append([]distribution.Descriptor{manifest.Target()}, descriptors...) + if !reflect.DeepEqual(references, expected) { + t.Fatal("References() does not match the descriptors added") + } +} diff --git a/manifest/ocischema/manifest.go b/manifest/ocischema/manifest.go new file mode 100644 index 00000000..52f64678 --- /dev/null +++ b/manifest/ocischema/manifest.go @@ -0,0 +1,133 @@ +package ocischema + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. + MediaTypeManifest = "application/vnd.oci.image.manifest.v1+json" + + // MediaTypeConfig specifies the mediaType for the image configuration. + MediaTypeConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypePluginConfig specifies the mediaType for plugin configuration. + MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json" + + // MediaTypeLayer is the mediaType used for layers referenced by the manifest. + MediaTypeLayer = "application/vnd.oci.image.layer.v1.tar+gzip" + + // MediaTypeForeignLayer is the mediaType used for layers that must be + // downloaded from foreign URLs. + MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, // Mike: todo this could confusing cause oci version 1 is closer to docker 2 than 1 + MediaType: MediaTypeManifest, + } +) + +func init() { + ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifest, ocischemaFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` +} + +// References returnes the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) + references = append(references, m.Config) + references = append(references, m.Layers...) + return references +} + +// Target returns the target of this signed manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/manifest/ocischema/manifest_test.go b/manifest/ocischema/manifest_test.go new file mode 100644 index 00000000..9f439dcd --- /dev/null +++ b/manifest/ocischema/manifest_test.go @@ -0,0 +1,111 @@ +package ocischema + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + + "github.com/docker/distribution" +) + +var expectedManifestSerialization = []byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "size": 985, + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "size": 153263, + "digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" + } + ] +}`) + +func TestManifest(t *testing.T) { + manifest := Manifest{ + Versioned: SchemaVersion, + Config: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + MediaType: MediaTypeConfig, + }, + Layers: []distribution.Descriptor{ + { + Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b", + Size: 153263, + MediaType: MediaTypeLayer, + }, + }, + } + + deserialized, err := FromStruct(manifest) + if err != nil { + t.Fatalf("error creating DeserializedManifest: %v", err) + } + + mediaType, canonical, err := deserialized.Payload() + + if mediaType != MediaTypeManifest { + t.Fatalf("unexpected media type: %s", mediaType) + } + + // Check that the canonical field is the same as json.MarshalIndent + // with these parameters. + p, err := json.MarshalIndent(&manifest, "", " ") + if err != nil { + t.Fatalf("error marshaling manifest: %v", err) + } + if !bytes.Equal(p, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p)) + } + + // Check that canonical field matches expected value. + if !bytes.Equal(expectedManifestSerialization, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestSerialization)) + } + + var unmarshalled DeserializedManifest + if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { + t.Fatalf("error unmarshaling manifest: %v", err) + } + + if !reflect.DeepEqual(&unmarshalled, deserialized) { + t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) + } + + target := deserialized.Target() + if target.Digest != "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" { + t.Fatalf("unexpected digest in target: %s", target.Digest.String()) + } + if target.MediaType != MediaTypeConfig { + t.Fatalf("unexpected media type in target: %s", target.MediaType) + } + if target.Size != 985 { + t.Fatalf("unexpected size in target: %d", target.Size) + } + + references := deserialized.References() + if len(references) != 2 { + t.Fatalf("unexpected number of references: %d", len(references)) + } + + if !reflect.DeepEqual(references[0], target) { + t.Fatalf("first reference should be target: %v != %v", references[0], target) + } + + // Test the second reference + if references[1].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" { + t.Fatalf("unexpected digest in reference: %s", references[0].Digest.String()) + } + if references[1].MediaType != MediaTypeLayer { + t.Fatalf("unexpected media type in reference: %s", references[0].MediaType) + } + if references[1].Size != 153263 { + t.Fatalf("unexpected size in reference: %d", references[0].Size) + } +} diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go index 98dcaa71..446f1b83 100644 --- a/registry/handlers/api_test.go +++ b/registry/handlers/api_test.go @@ -478,7 +478,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // ----------------------------------------- // Do layer push with an empty body and different digest - uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + uploadURLBase, _ = startPushLayer(t, env, imageName) resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) if err != nil { t.Fatalf("unexpected error doing bad layer push: %v", err) @@ -494,7 +494,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { t.Fatalf("unexpected error digesting empty buffer: %v", err) } - uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + uploadURLBase, _ = startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) // ----------------------------------------- @@ -507,7 +507,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { t.Fatalf("unexpected error digesting empty tar: %v", err) } - uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + uploadURLBase, _ = startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) // ------------------------------------------ @@ -515,7 +515,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { layerLength, _ := layerFile.Seek(0, os.SEEK_END) layerFile.Seek(0, os.SEEK_SET) - uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + uploadURLBase, _ = startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) // ------------------------------------------ @@ -529,7 +529,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { canonicalDigest := canonicalDigester.Digest() layerFile.Seek(0, 0) - uploadURLBase, uploadUUID = startPushLayer(t, env, imageName) + uploadURLBase, _ = startPushLayer(t, env, imageName) uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) finishUpload(t, env.builder, imageName, uploadURLBase, dgst) @@ -612,7 +612,7 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("If-None-Match", "") - resp, err = http.DefaultClient.Do(req) + resp, _ = http.DefaultClient.Do(req) checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) // Missing tests: @@ -1874,7 +1874,7 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { manifest := args.manifest ref, _ := reference.WithDigest(imageName, dgst) - manifestDigestURL, err := env.builder.BuildManifestURL(ref) + manifestDigestURL, _ := env.builder.BuildManifestURL(ref) // --------------- // Delete by digest resp, err := httpDelete(manifestDigestURL) @@ -1935,7 +1935,7 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { // Upload manifest by tag tag := "atag" tagRef, _ := reference.WithTag(imageName, tag) - manifestTagURL, err := env.builder.BuildManifestURL(tagRef) + manifestTagURL, _ := env.builder.BuildManifestURL(tagRef) resp = putManifest(t, "putting manifest by tag", manifestTagURL, args.mediaType, manifest) checkResponse(t, "putting manifest by tag", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ @@ -2502,7 +2502,7 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { checkResponse(t, "putting signed manifest to cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) // Manifest Delete - resp, err = httpDelete(manifestURL) + resp, _ = httpDelete(manifestURL) checkResponse(t, "deleting signed manifest from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) // Blob upload initialization @@ -2521,8 +2521,8 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { // Blob Delete ref, _ := reference.WithDigest(imageName, digestSha256EmptyTar) - blobURL, err := env.builder.BuildBlobURL(ref) - resp, err = httpDelete(blobURL) + blobURL, _ := env.builder.BuildBlobURL(ref) + resp, _ = httpDelete(blobURL) checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) } @@ -2601,9 +2601,9 @@ func TestProxyManifestGetByTag(t *testing.T) { checkErr(t, err, "building manifest url") resp, err = http.Get(manifestTagURL) - checkErr(t, err, "fetching manifest from proxy by tag") + checkErr(t, err, "fetching manifest from proxy by tag (error check 1)") defer resp.Body.Close() - checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) + checkResponse(t, "fetching manifest from proxy by tag (response check 1)", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, }) @@ -2616,9 +2616,9 @@ func TestProxyManifestGetByTag(t *testing.T) { // fetch it with the same proxy URL as before. Ensure the updated content is at the same tag resp, err = http.Get(manifestTagURL) - checkErr(t, err, "fetching manifest from proxy by tag") + checkErr(t, err, "fetching manifest from proxy by tag (error check 2)") defer resp.Body.Close() - checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) + checkResponse(t, "fetching manifest from proxy by tag (response check 2)", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{newDigest.String()}, }) diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go index 4e1d9868..af0a8876 100644 --- a/registry/handlers/manifests.go +++ b/registry/handlers/manifests.go @@ -9,6 +9,7 @@ import ( "github.com/docker/distribution" dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/ocischema" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" @@ -72,43 +73,10 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) imh.Errors = append(imh.Errors, err) return } - - var manifest distribution.Manifest - if imh.Tag != "" { - tags := imh.Repository.Tags(imh) - desc, err := tags.Get(imh, imh.Tag) - if err != nil { - if _, ok := err.(distribution.ErrTagUnknown); ok { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - } else { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return - } - imh.Digest = desc.Digest - } - - if etagMatch(r, imh.Digest.String()) { - w.WriteHeader(http.StatusNotModified) - return - } - - var options []distribution.ManifestServiceOption - if imh.Tag != "" { - options = append(options, distribution.WithTag(imh.Tag)) - } - manifest, err = manifests.Get(imh, imh.Digest, options...) - if err != nil { - if _, ok := err.(distribution.ErrManifestUnknownRevision); ok { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - } else { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return - } - supportsSchema2 := false supportsManifestList := false + supportsOCISchema := false + supportsOCIManifestList := false // this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values // https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202 for _, acceptHeader := range r.Header["Accept"] { @@ -132,16 +100,259 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) if mediaType == manifestlist.MediaTypeManifestList { supportsManifestList = true } + if mediaType == ocischema.MediaTypeManifest { + supportsOCISchema = true + } + if mediaType == manifestlist.MediaTypeOCIManifestList { + supportsOCIManifestList = true + } } } + supportsOCI := supportsOCISchema || supportsOCIManifestList + + var manifest distribution.Manifest + if imh.Tag != "" { + tags := imh.Repository.Tags(imh) + var desc distribution.Descriptor + if !supportsOCI { + desc, err = tags.Get(imh, imh.Tag) + } else { + desc, err = tags.Get(imh, imh.annotatedTag(false)) + } + if err != nil { + if _, ok := err.(distribution.ErrTagUnknown); ok { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + } else { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + imh.Digest = desc.Digest + } + + if etagMatch(r, imh.Digest.String()) { + w.WriteHeader(http.StatusNotModified) + return + } + + var options []distribution.ManifestServiceOption + if imh.Tag != "" { + options = append(options, distribution.WithTag(imh.annotatedTag(supportsOCI))) + } + manifest, err = manifests.Get(imh, imh.Digest, options...) + if err != nil { + if _, ok := err.(distribution.ErrManifestUnknownRevision); ok { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + } else { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) + isAnOCIManifest := isSchema2 && (schema2Manifest.MediaType == ocischema.MediaTypeManifest) + isAnOCIManifestList := isManifestList && (manifestList.MediaType == manifestlist.MediaTypeOCIManifestList) + + if (isSchema2 && !isAnOCIManifest) && (supportsOCISchema && !supportsSchema2) { + fmt.Printf("\n\nmanifest is schema2, but accept header only supports OCISchema \n\n") + w.WriteHeader(http.StatusNotFound) + return + } + if (isManifestList && !isAnOCIManifestList) && (supportsOCIManifestList && !supportsManifestList) { + fmt.Printf("\n\nmanifestlist is not OCI, but accept header only supports an OCI manifestlist\n\n") + w.WriteHeader(http.StatusNotFound) + return + } + if isAnOCIManifest && (!supportsOCISchema && supportsSchema2) { + fmt.Printf("\n\nmanifest is OCI, but accept header only supports schema2\n\n") + w.WriteHeader(http.StatusNotFound) + return + } + if isAnOCIManifestList && (!supportsOCIManifestList && supportsManifestList) { + fmt.Printf("\n\nmanifestlist is OCI, but accept header only supports non-OCI manifestlists\n\n") + w.WriteHeader(http.StatusNotFound) + return + } + // Only rewrite schema2 manifests when they are being fetched by tag. + // If they are being fetched by digest, we can't return something not + // matching the digest. + if imh.Tag != "" && isSchema2 && !(supportsSchema2 || supportsOCISchema) { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) + + manifest, err = imh.convertSchema2Manifest(schema2Manifest) + if err != nil { + return + } + } else if imh.Tag != "" && isManifestList && !(supportsManifestList || supportsOCIManifestList) { + // Rewrite manifest in schema1 format + ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) + + // Find the image manifest corresponding to the default + // platform + var manifestDigest digest.Digest + for _, manifestDescriptor := range manifestList.Manifests { + if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) + return + } + + manifest, err = manifests.Get(imh, manifestDigest) + if err != nil { + if _, ok := err.(distribution.ErrManifestUnknownRevision); ok { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + } else { + imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) + } + return + } + + // If necessary, convert the image manifest + if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !(supportsSchema2 || supportsOCISchema) { + manifest, err = imh.convertSchema2Manifest(schema2Manifest) + if err != nil { + return + } + } else { + imh.Digest = manifestDigest + } + } + + ct, p, err := manifest.Payload() + if err != nil { + return + } + + w.Header().Set("Content-Type", ct) + w.Header().Set("Content-Length", fmt.Sprint(len(p))) + w.Header().Set("Docker-Content-Digest", imh.Digest.String()) + w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) + w.Write(p) +} + +// GetImageManifest fetches the image manifest from the storage backend, if it exists. +func (imh *manifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { + fmt.Printf("\n\nGetting a manifest!\n\n\n") + supportsSchema2 := false + supportsManifestList := false + supportsOCISchema := false + supportsOCIManifestList := false + + // this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values + // https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202 + for _, acceptHeader := range r.Header["Accept"] { + // r.Header[...] is a slice in case the request contains the same header more than once + // if the header isn't set, we'll get the zero value, which "range" will handle gracefully + + // we need to split each header value on "," to get the full list of "Accept" values (per RFC 2616) + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 + for _, mediaType := range strings.Split(acceptHeader, ",") { + // remove "; q=..." if present + if i := strings.Index(mediaType, ";"); i >= 0 { + mediaType = mediaType[:i] + } + + // it's common (but not required) for Accept values to be space separated ("a/b, c/d, e/f") + mediaType = strings.TrimSpace(mediaType) + + if mediaType == schema2.MediaTypeManifest { + supportsSchema2 = true + } + if mediaType == manifestlist.MediaTypeManifestList { + supportsManifestList = true + } + if mediaType == ocischema.MediaTypeManifest { + supportsOCISchema = true + } + if mediaType == manifestlist.MediaTypeOCIManifestList { + supportsOCIManifestList = true + } + } + } + supportsOCI := supportsOCISchema || supportsOCIManifestList + + ctxu.GetLogger(imh).Debug("GetImageManifest") + manifests, err := imh.Repository.Manifests(imh) + if err != nil { + imh.Errors = append(imh.Errors, err) + return + } + + var manifest distribution.Manifest + if imh.Tag != "" { + tags := imh.Repository.Tags(imh) + var desc distribution.Descriptor + if !supportsOCI { + desc, err = tags.Get(imh, imh.Tag) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + } else { + desc, err = tags.Get(imh, imh.annotatedTag(supportsOCI)) + if err != nil { + desc, err = tags.Get(imh, imh.annotatedTag(false)) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + } + } + imh.Digest = desc.Digest + } + + if etagMatch(r, imh.Digest.String()) { + w.WriteHeader(http.StatusNotModified) + return + } + + var options []distribution.ManifestServiceOption + if imh.Tag != "" { + options = append(options, distribution.WithTag(imh.annotatedTag(supportsOCI))) + } + manifest, err = manifests.Get(imh, imh.Digest, options...) + if err != nil { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) + return + } + + schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) + manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) + isAnOCIManifest := isSchema2 && (schema2Manifest.MediaType == ocischema.MediaTypeManifest) + isAnOCIManifestList := isManifestList && (manifestList.MediaType == manifestlist.MediaTypeOCIManifestList) + + badCombinations := [][]bool{ + {isSchema2 && !isAnOCIManifest, supportsOCISchema && !supportsSchema2}, + {isManifestList && !isAnOCIManifestList, supportsOCIManifestList && !supportsManifestList}, + {isAnOCIManifest, !supportsOCISchema && supportsSchema2}, + {isAnOCIManifestList, !supportsOCIManifestList && supportsManifestList}, + } + for i, combo := range badCombinations { + if combo[0] && combo[1] { + fmt.Printf("\n\nbad combo! %d\n\n\n", i) + w.WriteHeader(http.StatusNotFound) + return + } + } + if isAnOCIManifest { + fmt.Print("\n\nreturning OCI manifest\n\n") + } else if isSchema2 { + fmt.Print("\n\nreturning schema 2 manifest\n\n") + } else { + fmt.Print("\n\nreturning schema 1 manifest\n\n") + } // Only rewrite schema2 manifests when they are being fetched by tag. // If they are being fetched by digest, we can't return something not // matching the digest. - if imh.Tag != "" && isSchema2 && !supportsSchema2 { + if imh.Tag != "" && isSchema2 && !(supportsSchema2 || supportsOCISchema) { // Rewrite manifest in schema1 format dcontext.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) @@ -149,7 +360,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) if err != nil { return } - } else if imh.Tag != "" && isManifestList && !supportsManifestList { + } else if imh.Tag != "" && isManifestList && !(supportsManifestList || supportsOCIManifestList) { // Rewrite manifest in schema1 format dcontext.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) @@ -199,6 +410,8 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) w.Write(p) + + fmt.Printf("\n\nSucceeded in getting the manifest!\n\n\n") } func (imh *manifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { @@ -286,9 +499,17 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) return } + isAnOCIManifest := mediaType == ocischema.MediaTypeManifest || mediaType == manifestlist.MediaTypeOCIManifestList + + if isAnOCIManifest { + fmt.Printf("\n\nPutting an OCI Manifest!\n\n\n") + } else { + fmt.Printf("\n\nPutting a Docker Manifest!\n\n\n") + } + var options []distribution.ManifestServiceOption if imh.Tag != "" { - options = append(options, distribution.WithTag(imh.Tag)) + options = append(options, distribution.WithTag(imh.annotatedTag(isAnOCIManifest))) } if err := imh.applyResourcePolicy(manifest); err != nil { @@ -301,10 +522,12 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) // TODO(stevvooe): These error handling switches really need to be // handled by an app global mapper. if err == distribution.ErrUnsupported { + fmt.Printf("\n\nXXX 1\n\n\n") imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) return } if err == distribution.ErrAccessDenied { + fmt.Printf("\n\nXXX 2\n\n\n") imh.Errors = append(imh.Errors, errcode.ErrorCodeDenied) return } @@ -331,15 +554,16 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) default: imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } - + fmt.Printf("\n\nXXX 3\n\n\n") return } // Tag this manifest if imh.Tag != "" { tags := imh.Repository.Tags(imh) - err = tags.Tag(imh, imh.Tag, desc) + err = tags.Tag(imh, imh.annotatedTag(isAnOCIManifest), desc) if err != nil { + fmt.Printf("\n\nXXX 4: %T: %v\n\n\n", err, err) imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } @@ -349,6 +573,7 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) // Construct a canonical url for the uploaded manifest. ref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest) if err != nil { + fmt.Printf("\n\nXXX 5\n\n\n") imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } @@ -364,6 +589,8 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) w.Header().Set("Location", location) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) w.WriteHeader(http.StatusCreated) + + fmt.Printf("\n\nSucceeded in putting manifest!\n\n\n") } // applyResourcePolicy checks whether the resource class matches what has @@ -478,3 +705,12 @@ func (imh *manifestHandler) DeleteManifest(w http.ResponseWriter, r *http.Reques w.WriteHeader(http.StatusAccepted) } + +// annotatedTag will annotate OCI tags by prepending a string, and leave docker +// tags unmodified. +func (imh *manifestHandler) annotatedTag(oci bool) string { + if oci { + return "oci." + imh.Tag + } + return imh.Tag +} diff --git a/registry/storage/driver/storagedriver.go b/registry/storage/driver/storagedriver.go index b220713f..46c1b9e0 100644 --- a/registry/storage/driver/storagedriver.go +++ b/registry/storage/driver/storagedriver.go @@ -116,7 +116,7 @@ type FileWriter interface { // number of path components separated by slashes, where each component is // restricted to alphanumeric characters or a period, underscore, or // hyphen. -var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) +var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._:-]+)+$`) // ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. type ErrUnsupportedMethod struct { diff --git a/registry/storage/manifeststore.go b/registry/storage/manifeststore.go index 20e34eb0..52bbe9da 100644 --- a/registry/storage/manifeststore.go +++ b/registry/storage/manifeststore.go @@ -9,6 +9,7 @@ import ( dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/ocischema" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/opencontainers/go-digest" @@ -48,6 +49,7 @@ type manifestStore struct { schema1Handler ManifestHandler schema2Handler ManifestHandler + ocischemaHandler ManifestHandler manifestListHandler ManifestHandler } @@ -99,7 +101,9 @@ func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options .. switch versioned.MediaType { case schema2.MediaTypeManifest: return ms.schema2Handler.Unmarshal(ctx, dgst, content) - case manifestlist.MediaTypeManifestList: + case ocischema.MediaTypeManifest: + return ms.ocischemaHandler.Unmarshal(ctx, dgst, content) + case manifestlist.MediaTypeManifestList, manifestlist.MediaTypeOCIManifestList: return ms.manifestListHandler.Unmarshal(ctx, dgst, content) default: return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} From 6fcea22b0aa01f513393033e268913dea7fef180 Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Mon, 10 Jul 2017 18:09:10 -0500 Subject: [PATCH 101/276] add an ocischema manifest handler for the registry Signed-off-by: Mike Brown --- registry/storage/manifeststore.go | 2 + registry/storage/ocimanifesthandler.go | 128 ++++++++++++++++++ registry/storage/ocimanifesthandler_test.go | 136 ++++++++++++++++++++ registry/storage/registry.go | 6 + 4 files changed, 272 insertions(+) create mode 100644 registry/storage/ocimanifesthandler.go create mode 100644 registry/storage/ocimanifesthandler_test.go diff --git a/registry/storage/manifeststore.go b/registry/storage/manifeststore.go index 52bbe9da..13b4f5b5 100644 --- a/registry/storage/manifeststore.go +++ b/registry/storage/manifeststore.go @@ -121,6 +121,8 @@ func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) case *schema2.DeserializedManifest: return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *ocischema.DeserializedManifest: + return ms.ocischemaHandler.Put(ctx, manifest, ms.skipDependencyVerification) case *manifestlist.DeserializedManifestList: return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) } diff --git a/registry/storage/ocimanifesthandler.go b/registry/storage/ocimanifesthandler.go new file mode 100644 index 00000000..bb658348 --- /dev/null +++ b/registry/storage/ocimanifesthandler.go @@ -0,0 +1,128 @@ +package storage + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/ocischema" + "github.com/opencontainers/go-digest" +) + +//ocischemaManifestHandler is a ManifestHandler that covers ocischema manifests. +type ocischemaManifestHandler struct { + repository distribution.Repository + blobStore distribution.BlobStore + ctx context.Context + manifestURLs manifestURLs +} + +var _ ManifestHandler = &ocischemaManifestHandler{} + +func (ms *ocischemaManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*ocischemaManifestHandler).Unmarshal") + + var m ocischema.DeserializedManifest + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *ocischemaManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*ocischemaManifestHandler).Put") + + m, ok := manifest.(*ocischema.DeserializedManifest) + if !ok { + return "", fmt.Errorf("non-ocischema manifest put to ocischemaManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to store +// valid content, leaving trust policies of that content up to consumers. +func (ms *ocischemaManifestHandler) verifyManifest(ctx context.Context, mnfst ocischema.DeserializedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if skipDependencyVerification { + return nil + } + + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + blobsService := ms.repository.Blobs(ctx) + + for _, descriptor := range mnfst.References() { + var err error + + switch descriptor.MediaType { + case ocischema.MediaTypeForeignLayer: + // Clients download this layer from an external URL, so do not check for + // its presense. + if len(descriptor.URLs) == 0 { + err = errMissingURL + } + allow := ms.manifestURLs.allow + deny := ms.manifestURLs.deny + for _, u := range descriptor.URLs { + var pu *url.URL + pu, err = url.Parse(u) + if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) { + err = errInvalidURL + break + } + } + case ocischema.MediaTypeManifest: + var exists bool + exists, err = manifestService.Exists(ctx, descriptor.Digest) + if err != nil || !exists { + err = distribution.ErrBlobUnknown // just coerce to unknown. + } + + fallthrough // double check the blob store. + default: + // forward all else to blob storage + if len(descriptor.URLs) == 0 { + _, err = blobsService.Stat(ctx, descriptor.Digest) + } + } + + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest}) + } + } + + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/registry/storage/ocimanifesthandler_test.go b/registry/storage/ocimanifesthandler_test.go new file mode 100644 index 00000000..e09f1ee3 --- /dev/null +++ b/registry/storage/ocimanifesthandler_test.go @@ -0,0 +1,136 @@ +package storage + +import ( + "regexp" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/ocischema" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +func TestVerifyOCIManifestForeignLayer(t *testing.T) { + ctx := context.Background() + inmemoryDriver := inmemory.New() + registry := createRegistry(t, inmemoryDriver, + ManifestURLsAllowRegexp(regexp.MustCompile("^https?://foo")), + ManifestURLsDenyRegexp(regexp.MustCompile("^https?://foo/nope"))) + repo := makeRepository(t, registry, "test") + manifestService := makeManifestService(t, repo) + + config, err := repo.Blobs(ctx).Put(ctx, ocischema.MediaTypeConfig, nil) + if err != nil { + t.Fatal(err) + } + + layer, err := repo.Blobs(ctx).Put(ctx, ocischema.MediaTypeLayer, nil) + if err != nil { + t.Fatal(err) + } + + foreignLayer := distribution.Descriptor{ + Digest: "sha256:463435349086340864309863409683460843608348608934092322395278926a", + Size: 6323, + MediaType: ocischema.MediaTypeForeignLayer, + } + + template := ocischema.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: ocischema.MediaTypeManifest, + }, + Config: config, + } + + type testcase struct { + BaseLayer distribution.Descriptor + URLs []string + Err error + } + + cases := []testcase{ + { + foreignLayer, + nil, + errMissingURL, + }, + { + // regular layers may have foreign urls + layer, + []string{"http://foo/bar"}, + nil, + }, + { + foreignLayer, + []string{"file:///local/file"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/bar#baz"}, + errInvalidURL, + }, + { + foreignLayer, + []string{""}, + errInvalidURL, + }, + { + foreignLayer, + []string{"https://foo/bar", ""}, + errInvalidURL, + }, + { + foreignLayer, + []string{"", "https://foo/bar"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://nope/bar"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/nope"}, + errInvalidURL, + }, + { + foreignLayer, + []string{"http://foo/bar"}, + nil, + }, + { + foreignLayer, + []string{"https://foo/bar"}, + nil, + }, + } + + for _, c := range cases { + m := template + l := c.BaseLayer + l.URLs = c.URLs + m.Layers = []distribution.Descriptor{l} + dm, err := ocischema.FromStruct(m) + if err != nil { + t.Error(err) + continue + } + + _, err = manifestService.Put(ctx, dm) + if verr, ok := err.(distribution.ErrManifestVerification); ok { + // Extract the first error + if len(verr) == 2 { + if _, ok = verr[1].(distribution.ErrManifestBlobUnknown); ok { + err = verr[0] + } + } + } + if err != c.Err { + t.Errorf("%#v: expected %v, got %v", l, c.Err, err) + } + } +} diff --git a/registry/storage/registry.go b/registry/storage/registry.go index 46b96853..f7c95a59 100644 --- a/registry/storage/registry.go +++ b/registry/storage/registry.go @@ -258,6 +258,12 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M repository: repo, blobStore: blobStore, }, + ocischemaHandler: &ocischemaManifestHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, + manifestURLs: repo.registry.manifestURLs, + }, } // Apply options From c94f28805e1b653204cc4cec2d76a12398e4524a Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Tue, 11 Jul 2017 14:19:47 -0500 Subject: [PATCH 102/276] OCI media types; annotation support; oci index Signed-off-by: Mike Brown --- blobs.go | 8 ++++ manifest/manifestlist/manifestlist.go | 12 ++--- manifest/manifestlist/manifestlist_test.go | 13 +++--- manifest/ocischema/builder.go | 7 +-- manifest/ocischema/builder_test.go | 9 ++-- manifest/ocischema/manifest.go | 33 ++++--------- manifest/ocischema/manifest_test.go | 13 +++--- registry/handlers/manifests.go | 51 ++++++++++++--------- registry/storage/manifeststore.go | 5 +- registry/storage/ocimanifesthandler.go | 6 ++- registry/storage/ocimanifesthandler_test.go | 35 +++++++------- 11 files changed, 102 insertions(+), 90 deletions(-) diff --git a/blobs.go b/blobs.go index 145b0785..42a43773 100644 --- a/blobs.go +++ b/blobs.go @@ -10,6 +10,7 @@ import ( "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" ) var ( @@ -72,6 +73,13 @@ type Descriptor struct { // URLs contains the source URLs of this content. URLs []string `json:"urls,omitempty"` + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // This should only be used when referring to a manifest. + Platform *v1.Platform `json:"platform,omitempty"` + // NOTE: Before adding a field here, please ensure that all // other options have been exhausted. Much of the type relationships // depend on the simplicity of this type. diff --git a/manifest/manifestlist/manifestlist.go b/manifest/manifestlist/manifestlist.go index 56f6aa61..3303c6be 100644 --- a/manifest/manifestlist/manifestlist.go +++ b/manifest/manifestlist/manifestlist.go @@ -7,16 +7,13 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/ocischema" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" ) const ( // MediaTypeManifestList specifies the mediaType for manifest lists. MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" - // MediaTypeOCIManifestList specifies the mediaType for OCI compliant manifest - // lists. - MediaTypeOCIManifestList = "application/vnd.oci.image.manifest.list.v1+json" ) // SchemaVersion provides a pre-initialized version structure for this @@ -30,7 +27,7 @@ var SchemaVersion = manifest.Versioned{ // packages OCIschema version of the manifest. var OCISchemaVersion = manifest.Versioned{ SchemaVersion: 2, - MediaType: MediaTypeOCIManifestList, + MediaType: v1.MediaTypeImageIndex, } func init() { @@ -92,6 +89,9 @@ type ManifestList struct { // Config references the image configuration as a blob. Manifests []ManifestDescriptor `json:"manifests"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` } // References returns the distribution descriptors for the referenced image @@ -119,7 +119,7 @@ type DeserializedManifestList struct { // and its JSON representation. func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { var m ManifestList - if len(descriptors) > 0 && descriptors[0].Descriptor.MediaType == ocischema.MediaTypeManifest { + if len(descriptors) > 0 && descriptors[0].Descriptor.MediaType == v1.MediaTypeImageManifest { m = ManifestList{ Versioned: OCISchemaVersion, } diff --git a/manifest/manifestlist/manifestlist_test.go b/manifest/manifestlist/manifestlist_test.go index 1d24aef2..5ab328dd 100644 --- a/manifest/manifestlist/manifestlist_test.go +++ b/manifest/manifestlist/manifestlist_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/docker/distribution" + "github.com/opencontainers/image-spec/specs-go/v1" ) var expectedManifestListSerialization = []byte(`{ @@ -110,9 +111,9 @@ func TestManifestList(t *testing.T) { } } -var expectedOCIManifestListSerialization = []byte(`{ +var expectedOCIImageIndexSerialization = []byte(`{ "schemaVersion": 2, - "mediaType": "application/vnd.oci.image.manifest.list.v1+json", + "mediaType": "application/vnd.oci.image.index.v1+json", "manifests": [ { "mediaType": "application/vnd.oci.image.manifest.v1+json", @@ -138,7 +139,7 @@ var expectedOCIManifestListSerialization = []byte(`{ ] }`) -func TestOCIManifestList(t *testing.T) { +func TestOCIImageIndex(t *testing.T) { manifestDescriptors := []ManifestDescriptor{ { Descriptor: distribution.Descriptor{ @@ -172,7 +173,7 @@ func TestOCIManifestList(t *testing.T) { mediaType, canonical, _ := deserialized.Payload() - if mediaType != MediaTypeOCIManifestList { + if mediaType != v1.MediaTypeImageIndex { t.Fatalf("unexpected media type: %s", mediaType) } @@ -187,8 +188,8 @@ func TestOCIManifestList(t *testing.T) { } // Check that the canonical field has the expected value. - if !bytes.Equal(expectedOCIManifestListSerialization, canonical) { - t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedOCIManifestListSerialization)) + if !bytes.Equal(expectedOCIImageIndexSerialization, canonical) { + t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedOCIImageIndexSerialization)) } var unmarshalled DeserializedManifestList diff --git a/manifest/ocischema/builder.go b/manifest/ocischema/builder.go index 5318c139..a6d6633a 100644 --- a/manifest/ocischema/builder.go +++ b/manifest/ocischema/builder.go @@ -4,6 +4,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" ) // builder is a type for constructing manifests. @@ -48,7 +49,7 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { case nil: // Override MediaType, since Put always replaces the specified media // type with application/octet-stream in the descriptor it returns. - m.Config.MediaType = MediaTypeConfig + m.Config.MediaType = v1.MediaTypeImageConfig return FromStruct(m) case distribution.ErrBlobUnknown: // nop @@ -57,10 +58,10 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { } // Add config to the blob store - m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) + m.Config, err = mb.bs.Put(ctx, v1.MediaTypeImageConfig, mb.configJSON) // Override MediaType, since Put always replaces the specified media // type with application/octet-stream in the descriptor it returns. - m.Config.MediaType = MediaTypeConfig + m.Config.MediaType = v1.MediaTypeImageConfig if err != nil { return nil, err } diff --git a/manifest/ocischema/builder_test.go b/manifest/ocischema/builder_test.go index 7192ac59..33e9164f 100644 --- a/manifest/ocischema/builder_test.go +++ b/manifest/ocischema/builder_test.go @@ -7,6 +7,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" ) type mockBlobService struct { @@ -151,17 +152,17 @@ func TestBuilder(t *testing.T) { { Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), Size: 5312, - MediaType: MediaTypeLayer, + MediaType: v1.MediaTypeImageLayerGzip, }, { Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), Size: 235231, - MediaType: MediaTypeLayer, + MediaType: v1.MediaTypeImageLayerGzip, }, { Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), Size: 639152, - MediaType: MediaTypeLayer, + MediaType: v1.MediaTypeImageLayerGzip, }, } @@ -195,7 +196,7 @@ func TestBuilder(t *testing.T) { if target.Digest != configDigest { t.Fatalf("unexpected digest in target: %s", target.Digest.String()) } - if target.MediaType != MediaTypeConfig { + if target.MediaType != v1.MediaTypeImageConfig { t.Fatalf("unexpected media type in target: %s", target.MediaType) } if target.Size != 3153 { diff --git a/manifest/ocischema/manifest.go b/manifest/ocischema/manifest.go index 52f64678..f1a05f0a 100644 --- a/manifest/ocischema/manifest.go +++ b/manifest/ocischema/manifest.go @@ -8,32 +8,15 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/manifest" "github.com/opencontainers/go-digest" -) - -const ( - // MediaTypeManifest specifies the mediaType for the current version. - MediaTypeManifest = "application/vnd.oci.image.manifest.v1+json" - - // MediaTypeConfig specifies the mediaType for the image configuration. - MediaTypeConfig = "application/vnd.oci.image.config.v1+json" - - // MediaTypePluginConfig specifies the mediaType for plugin configuration. - MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json" - - // MediaTypeLayer is the mediaType used for layers referenced by the manifest. - MediaTypeLayer = "application/vnd.oci.image.layer.v1.tar+gzip" - - // MediaTypeForeignLayer is the mediaType used for layers that must be - // downloaded from foreign URLs. - MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + "github.com/opencontainers/image-spec/specs-go/v1" ) var ( // SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest. SchemaVersion = manifest.Versioned{ - SchemaVersion: 2, // Mike: todo this could confusing cause oci version 1 is closer to docker 2 than 1 - MediaType: MediaTypeManifest, + SchemaVersion: 2, // TODO: (mikebrow/stevvooe) this could be confusing cause oci version 1 is closer to docker 2 than 1 + MediaType: v1.MediaTypeImageManifest, } ) @@ -46,15 +29,15 @@ func init() { } dgst := digest.FromBytes(b) - return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageManifest}, err } - err := distribution.RegisterManifestSchema(MediaTypeManifest, ocischemaFunc) + err := distribution.RegisterManifestSchema(v1.MediaTypeImageManifest, ocischemaFunc) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } } -// Manifest defines a schema2 manifest. +// Manifest defines a ocischema manifest. type Manifest struct { manifest.Versioned @@ -64,6 +47,9 @@ type Manifest struct { // Layers lists descriptors for the layers referenced by the // configuration. Layers []distribution.Descriptor `json:"layers"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` } // References returnes the descriptors of this manifests references. @@ -71,6 +57,7 @@ func (m Manifest) References() []distribution.Descriptor { references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) references = append(references, m.Config) references = append(references, m.Layers...) + // TODO: (mikebrow/stevvooe) should we return annotations as references return references } diff --git a/manifest/ocischema/manifest_test.go b/manifest/ocischema/manifest_test.go index 9f439dcd..55dc0306 100644 --- a/manifest/ocischema/manifest_test.go +++ b/manifest/ocischema/manifest_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/docker/distribution" + "github.com/opencontainers/image-spec/specs-go/v1" ) var expectedManifestSerialization = []byte(`{ @@ -32,13 +33,13 @@ func TestManifest(t *testing.T) { Config: distribution.Descriptor{ Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", Size: 985, - MediaType: MediaTypeConfig, + MediaType: v1.MediaTypeImageConfig, }, Layers: []distribution.Descriptor{ { Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b", Size: 153263, - MediaType: MediaTypeLayer, + MediaType: v1.MediaTypeImageLayerGzip, }, }, } @@ -48,9 +49,9 @@ func TestManifest(t *testing.T) { t.Fatalf("error creating DeserializedManifest: %v", err) } - mediaType, canonical, err := deserialized.Payload() + mediaType, canonical, _ := deserialized.Payload() - if mediaType != MediaTypeManifest { + if mediaType != v1.MediaTypeImageManifest { t.Fatalf("unexpected media type: %s", mediaType) } @@ -82,7 +83,7 @@ func TestManifest(t *testing.T) { if target.Digest != "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" { t.Fatalf("unexpected digest in target: %s", target.Digest.String()) } - if target.MediaType != MediaTypeConfig { + if target.MediaType != v1.MediaTypeImageConfig { t.Fatalf("unexpected media type in target: %s", target.MediaType) } if target.Size != 985 { @@ -102,7 +103,7 @@ func TestManifest(t *testing.T) { if references[1].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" { t.Fatalf("unexpected digest in reference: %s", references[0].Digest.String()) } - if references[1].MediaType != MediaTypeLayer { + if references[1].MediaType != v1.MediaTypeImageLayerGzip { t.Fatalf("unexpected media type in reference: %s", references[0].MediaType) } if references[1].Size != 153263 { diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go index af0a8876..07fd6f8c 100644 --- a/registry/handlers/manifests.go +++ b/registry/handlers/manifests.go @@ -18,6 +18,7 @@ import ( "github.com/docker/distribution/registry/auth" "github.com/gorilla/handlers" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" ) // These constants determine which architecture and OS to choose from a @@ -76,7 +77,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) supportsSchema2 := false supportsManifestList := false supportsOCISchema := false - supportsOCIManifestList := false + supportsOCIImageIndex := false // this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values // https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202 for _, acceptHeader := range r.Header["Accept"] { @@ -100,15 +101,15 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) if mediaType == manifestlist.MediaTypeManifestList { supportsManifestList = true } - if mediaType == ocischema.MediaTypeManifest { + if mediaType == v1.MediaTypeImageManifest { supportsOCISchema = true } - if mediaType == manifestlist.MediaTypeOCIManifestList { - supportsOCIManifestList = true + if mediaType == v1.MediaTypeImageIndex { + supportsOCIImageIndex = true } } } - supportsOCI := supportsOCISchema || supportsOCIManifestList + supportsOCI := supportsOCISchema || supportsOCIImageIndex var manifest distribution.Manifest if imh.Tag != "" { @@ -151,15 +152,15 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) - isAnOCIManifest := isSchema2 && (schema2Manifest.MediaType == ocischema.MediaTypeManifest) - isAnOCIManifestList := isManifestList && (manifestList.MediaType == manifestlist.MediaTypeOCIManifestList) + isAnOCIManifest := isSchema2 && (schema2Manifest.MediaType == v1.MediaTypeImageManifest) + isAnOCIImageIndex := isManifestList && (manifestList.MediaType == v1.MediaTypeImageIndex) if (isSchema2 && !isAnOCIManifest) && (supportsOCISchema && !supportsSchema2) { fmt.Printf("\n\nmanifest is schema2, but accept header only supports OCISchema \n\n") w.WriteHeader(http.StatusNotFound) return } - if (isManifestList && !isAnOCIManifestList) && (supportsOCIManifestList && !supportsManifestList) { + if (isManifestList && !isAnOCIImageIndex) && (supportsOCIImageIndex && !supportsManifestList) { fmt.Printf("\n\nmanifestlist is not OCI, but accept header only supports an OCI manifestlist\n\n") w.WriteHeader(http.StatusNotFound) return @@ -169,7 +170,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) w.WriteHeader(http.StatusNotFound) return } - if isAnOCIManifestList && (!supportsOCIManifestList && supportsManifestList) { + if isAnOCIImageIndex && (!supportsOCIImageIndex && supportsManifestList) { fmt.Printf("\n\nmanifestlist is OCI, but accept header only supports non-OCI manifestlists\n\n") w.WriteHeader(http.StatusNotFound) return @@ -185,7 +186,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) if err != nil { return } - } else if imh.Tag != "" && isManifestList && !(supportsManifestList || supportsOCIManifestList) { + } else if imh.Tag != "" && isManifestList && !(supportsManifestList || supportsOCIImageIndex) { // Rewrite manifest in schema1 format ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) @@ -243,7 +244,7 @@ func (imh *manifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Requ supportsSchema2 := false supportsManifestList := false supportsOCISchema := false - supportsOCIManifestList := false + supportsOCIImageIndex := false // this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values // https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202 @@ -268,15 +269,15 @@ func (imh *manifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Requ if mediaType == manifestlist.MediaTypeManifestList { supportsManifestList = true } - if mediaType == ocischema.MediaTypeManifest { + if mediaType == v1.MediaTypeImageManifest { supportsOCISchema = true } - if mediaType == manifestlist.MediaTypeOCIManifestList { - supportsOCIManifestList = true + if mediaType == v1.MediaTypeImageIndex { + supportsOCIImageIndex = true } } } - supportsOCI := supportsOCISchema || supportsOCIManifestList + supportsOCI := supportsOCISchema || supportsOCIImageIndex ctxu.GetLogger(imh).Debug("GetImageManifest") manifests, err := imh.Repository.Manifests(imh) @@ -325,14 +326,14 @@ func (imh *manifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Requ schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) - isAnOCIManifest := isSchema2 && (schema2Manifest.MediaType == ocischema.MediaTypeManifest) - isAnOCIManifestList := isManifestList && (manifestList.MediaType == manifestlist.MediaTypeOCIManifestList) + isAnOCIManifest := isSchema2 && (schema2Manifest.MediaType == v1.MediaTypeImageManifest) + isAnOCIImageIndex := isManifestList && (manifestList.MediaType == v1.MediaTypeImageIndex) badCombinations := [][]bool{ {isSchema2 && !isAnOCIManifest, supportsOCISchema && !supportsSchema2}, - {isManifestList && !isAnOCIManifestList, supportsOCIManifestList && !supportsManifestList}, + {isManifestList && !isAnOCIImageIndex, supportsOCIImageIndex && !supportsManifestList}, {isAnOCIManifest, !supportsOCISchema && supportsSchema2}, - {isAnOCIManifestList, !supportsOCIManifestList && supportsManifestList}, + {isAnOCIImageIndex, !supportsOCIImageIndex && supportsManifestList}, } for i, combo := range badCombinations { if combo[0] && combo[1] { @@ -360,7 +361,7 @@ func (imh *manifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Requ if err != nil { return } - } else if imh.Tag != "" && isManifestList && !(supportsManifestList || supportsOCIManifestList) { + } else if imh.Tag != "" && isManifestList && !(supportsManifestList || supportsOCIImageIndex) { // Rewrite manifest in schema1 format dcontext.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) @@ -499,7 +500,7 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) return } - isAnOCIManifest := mediaType == ocischema.MediaTypeManifest || mediaType == manifestlist.MediaTypeOCIManifestList + isAnOCIManifest := mediaType == v1.MediaTypeImageManifest || mediaType == v1.MediaTypeImageIndex if isAnOCIManifest { fmt.Printf("\n\nPutting an OCI Manifest!\n\n\n") @@ -615,6 +616,14 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) message := fmt.Sprintf("unknown manifest class for %s", m.Config.MediaType) return errcode.ErrorCodeDenied.WithMessage(message) } + case *ocischema.DeserializedManifest: + switch m.Config.MediaType { + case v1.MediaTypeImageConfig: + class = "image" + default: + message := fmt.Sprintf("unknown manifest class for %s", m.Config.MediaType) + return errcode.ErrorCodeDenied.WithMessage(message) + } } if class == "" { diff --git a/registry/storage/manifeststore.go b/registry/storage/manifeststore.go index 13b4f5b5..6543f6fd 100644 --- a/registry/storage/manifeststore.go +++ b/registry/storage/manifeststore.go @@ -13,6 +13,7 @@ import ( "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" ) // A ManifestHandler gets and puts manifests of a particular type. @@ -101,9 +102,9 @@ func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options .. switch versioned.MediaType { case schema2.MediaTypeManifest: return ms.schema2Handler.Unmarshal(ctx, dgst, content) - case ocischema.MediaTypeManifest: + case v1.MediaTypeImageManifest: return ms.ocischemaHandler.Unmarshal(ctx, dgst, content) - case manifestlist.MediaTypeManifestList, manifestlist.MediaTypeOCIManifestList: + case manifestlist.MediaTypeManifestList, v1.MediaTypeImageIndex: return ms.manifestListHandler.Unmarshal(ctx, dgst, content) default: return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} diff --git a/registry/storage/ocimanifesthandler.go b/registry/storage/ocimanifesthandler.go index bb658348..148b7a2e 100644 --- a/registry/storage/ocimanifesthandler.go +++ b/registry/storage/ocimanifesthandler.go @@ -9,6 +9,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/manifest/ocischema" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" ) //ocischemaManifestHandler is a ManifestHandler that covers ocischema manifests. @@ -79,7 +80,8 @@ func (ms *ocischemaManifestHandler) verifyManifest(ctx context.Context, mnfst oc var err error switch descriptor.MediaType { - case ocischema.MediaTypeForeignLayer: + // TODO: mikebrow/steveoe verify we should treat oci nondistributable like foreign layers? + case v1.MediaTypeImageLayerNonDistributable, v1.MediaTypeImageLayerNonDistributableGzip: // Clients download this layer from an external URL, so do not check for // its presense. if len(descriptor.URLs) == 0 { @@ -95,7 +97,7 @@ func (ms *ocischemaManifestHandler) verifyManifest(ctx context.Context, mnfst oc break } } - case ocischema.MediaTypeManifest: + case v1.MediaTypeImageManifest: var exists bool exists, err = manifestService.Exists(ctx, descriptor.Digest) if err != nil || !exists { diff --git a/registry/storage/ocimanifesthandler_test.go b/registry/storage/ocimanifesthandler_test.go index e09f1ee3..d04ce531 100644 --- a/registry/storage/ocimanifesthandler_test.go +++ b/registry/storage/ocimanifesthandler_test.go @@ -9,9 +9,10 @@ import ( "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/ocischema" "github.com/docker/distribution/registry/storage/driver/inmemory" + "github.com/opencontainers/image-spec/specs-go/v1" ) -func TestVerifyOCIManifestForeignLayer(t *testing.T) { +func TestVerifyOCIManifestNonDistributableLayer(t *testing.T) { ctx := context.Background() inmemoryDriver := inmemory.New() registry := createRegistry(t, inmemoryDriver, @@ -20,26 +21,26 @@ func TestVerifyOCIManifestForeignLayer(t *testing.T) { repo := makeRepository(t, registry, "test") manifestService := makeManifestService(t, repo) - config, err := repo.Blobs(ctx).Put(ctx, ocischema.MediaTypeConfig, nil) + config, err := repo.Blobs(ctx).Put(ctx, v1.MediaTypeImageConfig, nil) if err != nil { t.Fatal(err) } - layer, err := repo.Blobs(ctx).Put(ctx, ocischema.MediaTypeLayer, nil) + layer, err := repo.Blobs(ctx).Put(ctx, v1.MediaTypeImageLayerGzip, nil) if err != nil { t.Fatal(err) } - foreignLayer := distribution.Descriptor{ + nonDistributableLayer := distribution.Descriptor{ Digest: "sha256:463435349086340864309863409683460843608348608934092322395278926a", Size: 6323, - MediaType: ocischema.MediaTypeForeignLayer, + MediaType: v1.MediaTypeImageLayerNonDistributableGzip, } template := ocischema.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 2, - MediaType: ocischema.MediaTypeManifest, + MediaType: v1.MediaTypeImageManifest, }, Config: config, } @@ -52,58 +53,58 @@ func TestVerifyOCIManifestForeignLayer(t *testing.T) { cases := []testcase{ { - foreignLayer, + nonDistributableLayer, nil, errMissingURL, }, { - // regular layers may have foreign urls + // regular layers may have foreign urls (non-Distributable Layers) layer, []string{"http://foo/bar"}, nil, }, { - foreignLayer, + nonDistributableLayer, []string{"file:///local/file"}, errInvalidURL, }, { - foreignLayer, + nonDistributableLayer, []string{"http://foo/bar#baz"}, errInvalidURL, }, { - foreignLayer, + nonDistributableLayer, []string{""}, errInvalidURL, }, { - foreignLayer, + nonDistributableLayer, []string{"https://foo/bar", ""}, errInvalidURL, }, { - foreignLayer, + nonDistributableLayer, []string{"", "https://foo/bar"}, errInvalidURL, }, { - foreignLayer, + nonDistributableLayer, []string{"http://nope/bar"}, errInvalidURL, }, { - foreignLayer, + nonDistributableLayer, []string{"http://foo/nope"}, errInvalidURL, }, { - foreignLayer, + nonDistributableLayer, []string{"http://foo/bar"}, nil, }, { - foreignLayer, + nonDistributableLayer, []string{"https://foo/bar"}, nil, }, From fcaffa38bcbb6055a626b22bb5c6c6488c92833e Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Tue, 11 Jul 2017 14:20:34 -0500 Subject: [PATCH 103/276] vendor oci image-spec Signed-off-by: Mike Brown --- vendor.conf | 1 + .../opencontainers/image-spec/LICENSE | 191 ++++++++++++++++++ .../opencontainers/image-spec/README.md | 167 +++++++++++++++ .../image-spec/specs-go/v1/annotations.go | 56 +++++ .../image-spec/specs-go/v1/config.go | 103 ++++++++++ .../image-spec/specs-go/v1/descriptor.go | 64 ++++++ .../image-spec/specs-go/v1/index.go | 29 +++ .../image-spec/specs-go/v1/layout.go | 28 +++ .../image-spec/specs-go/v1/manifest.go | 32 +++ .../image-spec/specs-go/v1/mediatype.go | 48 +++++ .../image-spec/specs-go/version.go | 32 +++ .../image-spec/specs-go/versioned.go | 23 +++ 12 files changed, 774 insertions(+) create mode 100644 vendor/github.com/opencontainers/image-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/image-spec/README.md create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/versioned.go diff --git a/vendor.conf b/vendor.conf index b67e03ad..4f52b597 100644 --- a/vendor.conf +++ b/vendor.conf @@ -49,3 +49,4 @@ gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb +github.com/opencontainers/image-spec e0536ae0cb47b8fed6afdaa9ba2589f6bd915caa diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE new file mode 100644 index 00000000..9fdc20fd --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/image-spec/README.md b/vendor/github.com/opencontainers/image-spec/README.md new file mode 100644 index 00000000..5ab5554e --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/README.md @@ -0,0 +1,167 @@ +# OCI Image Format Specification +
+ + + +
+ +The OCI Image Format project creates and maintains the software shipping container image format spec (OCI Image Format). + +**[The specification can be found here](spec.md).** + +This repository also provides [Go types](specs-go), [intra-blob validation tooling, and JSON Schema](schema). +The Go types and validation should be compatible with the current Go release; earlier Go releases are not supported. + +Additional documentation about how this group operates: + +- [Code of Conduct](https://github.com/opencontainers/tob/blob/d2f9d68c1332870e40693fe077d311e0742bc73d/code-of-conduct.md) +- [Roadmap](#roadmap) +- [Releases](RELEASES.md) +- [Project Documentation](project.md) + +The _optional_ and _base_ layers of all OCI projects are tracked in the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table). + +## Running an OCI Image + +The OCI Image Format partner project is the [OCI Runtime Spec project](https://github.com/opencontainers/runtime-spec). +The Runtime Specification outlines how to run a "[filesystem bundle](https://github.com/opencontainers/runtime-spec/blob/master/bundle.md)" that is unpacked on disk. +At a high-level an OCI implementation would download an OCI Image then unpack that image into an OCI Runtime filesystem bundle. +At this point the OCI Runtime Bundle would be run by an OCI Runtime. + +This entire workflow supports the UX that users have come to expect from container engines like Docker and rkt: primarily, the ability to run an image with no additional arguments: + +* docker run example.com/org/app:v1.0.0 +* rkt run example.com/org/app,version=v1.0.0 + +To support this UX the OCI Image Format contains sufficient information to launch the application on the target platform (e.g. command, arguments, environment variables, etc). + +## FAQ + +**Q: Why doesn't this project mention distribution?** + +A: Distribution, for example using HTTP as both Docker v2.2 and AppC do today, is currently out of scope on the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table). +There has been [some discussion on the TOB mailing list](https://groups.google.com/a/opencontainers.org/d/msg/tob/A3JnmI-D-6Y/tLuptPDHAgAJ) to make distribution an optional layer, but this topic is a work in progress. + +**Q: What happens to AppC or Docker Image Formats?** + +A: Existing formats can continue to be a proving ground for technologies, as needed. +The OCI Image Format project strives to provide a dependable open specification that can be shared between different tools and be evolved for years or decades of compatibility; as the deb and rpm format have. + +Find more [FAQ on the OCI site](https://www.opencontainers.org/faq). + +## Roadmap + +The [GitHub milestones](https://github.com/opencontainers/image-spec/milestones) lay out the path to the OCI v1.0.0 release in late 2016. + +# Contributing + +Development happens on GitHub for the spec. +Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list). + +The specification and code is licensed under the Apache 2.0 license found in the `LICENSE` file of this repository. + +## Discuss your design + +The project welcomes submissions, but please let everyone know what you are working on. + +Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do. +This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits. +It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions. + +Typos and grammatical errors can go straight to a pull-request. +When in doubt, start on the [mailing-list](#mailing-list). + +## Weekly Call + +The contributors and maintainers of all OCI projects have a weekly meeting Wednesdays at 2:00 PM (USA Pacific). +Everyone is welcome to participate via [UberConference web][UberConference] or audio-only: +1-415-968-0849 (no PIN needed). +An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there. +Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived [here][minutes]. + +## Mailing List + +You can subscribe and join the mailing list on [Google Groups](https://groups.google.com/a/opencontainers.org/forum/#!forum/dev). + +## IRC + +OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]). + +## Markdown style + +To keep consistency throughout the Markdown files in the Open Container spec all files should be formatted one sentence per line. +This fixes two things: it makes diffing easier with git and it resolves fights about line wrapping length. +For example, this paragraph will span three lines in the Markdown source. + +## Git commit + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. +The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. + +### Commit Style + +Simple house-keeping for clean git history. +Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) or the Discussion section of [`git-commit(1)`](http://git-scm.com/docs/git-commit). + +1. Separate the subject from body with a blank line +2. Limit the subject line to 50 characters +3. Capitalize the subject line +4. Do not end the subject line with a period +5. Use the imperative mood in the subject line +6. Wrap the body at 72 characters +7. Use the body to explain what and why vs. how + * If there was important/useful/essential conversation or information, copy or include a reference +8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...") + + +[UberConference]: https://www.uberconference.com/opencontainers +[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/ +[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/ diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go new file mode 100644 index 00000000..35d81089 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -0,0 +1,56 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go new file mode 100644 index 00000000..fe799bd6 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -0,0 +1,103 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Architecture is the CPU architecture which the binaries in this image are built to run on. + Architecture string `json:"architecture"` + + // OS is the name of the operating system which the image is built to run on. + OS string `json:"os"` + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go new file mode 100644 index 00000000..6e442a08 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import digest "github.com/opencontainers/go-digest" + +// Descriptor describes the disposition of targeted content. +// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype +// when marshalled to JSON. +type Descriptor struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType,omitempty"` + + // Digest is the digest of the targeted content. + Digest digest.Digest `json:"digest"` + + // Size specifies the size in bytes of the blob. + Size int64 `json:"size"` + + // URLs specifies a list of URLs from which this object MAY be downloaded + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // + // This should only be used when referring to a manifest. + Platform *Platform `json:"platform,omitempty"` +} + +// Platform describes the platform which the image in the manifest runs on. +type Platform struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `v7` to specify ARMv7 when architecture is `arm`. + Variant string `json:"variant,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go new file mode 100644 index 00000000..4e6c4b23 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + specs.Versioned + + // Manifests references platform specific manifests. + Manifests []Descriptor `json:"manifests"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go new file mode 100644 index 00000000..fc79e9e0 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // ImageLayoutFile is the file name of oci image layout file + ImageLayoutFile = "oci-layout" + // ImageLayoutVersion is the version of ImageLayout + ImageLayoutVersion = "1.0.0" +) + +// ImageLayout is the structure in the "oci-layout" file, found in the root +// of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go new file mode 100644 index 00000000..7ff32c40 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. +type Manifest struct { + specs.Versioned + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []Descriptor `json:"layers"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go new file mode 100644 index 00000000..bad7bb97 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -0,0 +1,48 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // MediaTypeDescriptor specifies the media type for a content descriptor. + MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" + + // MediaTypeLayoutHeader specifies the media type for the oci-layout. + MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + + // MediaTypeImageManifest specifies the media type for an image manifest. + MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" + + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + + // MediaTypeImageLayer is the media type used for layers referenced by the manifest. + MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" + + // MediaTypeImageLayerGzip is the media type used for gzipped layers + // referenced by the manifest. + MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by + // the manifest but with distribution restrictions. + MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + // MediaTypeImageLayerNonDistributableGzip is the media type for + // gzipped layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go new file mode 100644 index 00000000..f4cda6ed --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "-rc6-dev" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go new file mode 100644 index 00000000..58a1510f --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} From 426afb3a4c17a619273cfba8ad169b10418f5627 Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Wed, 12 Jul 2017 18:07:46 -0500 Subject: [PATCH 104/276] address get manifest issue with oci. namespace; and comment descriptions Signed-off-by: Mike Brown --- manifest/ocischema/manifest.go | 2 +- manifest/schema2/manifest.go | 2 +- registry/handlers/manifests.go | 188 ++------------------------------- 3 files changed, 9 insertions(+), 183 deletions(-) diff --git a/manifest/ocischema/manifest.go b/manifest/ocischema/manifest.go index f1a05f0a..a5b199d4 100644 --- a/manifest/ocischema/manifest.go +++ b/manifest/ocischema/manifest.go @@ -61,7 +61,7 @@ func (m Manifest) References() []distribution.Descriptor { return references } -// Target returns the target of this signed manifest. +// Target returns the target of this manifest. func (m Manifest) Target() distribution.Descriptor { return m.Config } diff --git a/manifest/schema2/manifest.go b/manifest/schema2/manifest.go index a2708c75..26a6a334 100644 --- a/manifest/schema2/manifest.go +++ b/manifest/schema2/manifest.go @@ -79,7 +79,7 @@ func (m Manifest) References() []distribution.Descriptor { return references } -// Target returns the target of this signed manifest. +// Target returns the target of this manifest. func (m Manifest) Target() distribution.Descriptor { return m.Config } diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go index 07fd6f8c..c6e17ada 100644 --- a/registry/handlers/manifests.go +++ b/registry/handlers/manifests.go @@ -111,13 +111,11 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) } supportsOCI := supportsOCISchema || supportsOCIImageIndex - var manifest distribution.Manifest if imh.Tag != "" { tags := imh.Repository.Tags(imh) var desc distribution.Descriptor - if !supportsOCI { - desc, err = tags.Get(imh, imh.Tag) - } else { + desc, err = tags.Get(imh, imh.annotatedTag(supportsOCI)) + if err != nil && supportsOCI { // in the supportsOCI case fall back to the non OCI image desc, err = tags.Get(imh, imh.annotatedTag(false)) } if err != nil { @@ -140,7 +138,12 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) if imh.Tag != "" { options = append(options, distribution.WithTag(imh.annotatedTag(supportsOCI))) } + var manifest distribution.Manifest manifest, err = manifests.Get(imh, imh.Digest, options...) + if err != nil && supportsOCI && imh.Tag != "" { // in the supportsOCI case fall back to the non OCI image + options = append(options[:0], distribution.WithTag(imh.annotatedTag(false))) + manifest, err = manifests.Get(imh, imh.Digest, options...) + } if err != nil { if _, ok := err.(distribution.ErrManifestUnknownRevision); ok { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) @@ -238,183 +241,6 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) w.Write(p) } -// GetImageManifest fetches the image manifest from the storage backend, if it exists. -func (imh *manifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { - fmt.Printf("\n\nGetting a manifest!\n\n\n") - supportsSchema2 := false - supportsManifestList := false - supportsOCISchema := false - supportsOCIImageIndex := false - - // this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values - // https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202 - for _, acceptHeader := range r.Header["Accept"] { - // r.Header[...] is a slice in case the request contains the same header more than once - // if the header isn't set, we'll get the zero value, which "range" will handle gracefully - - // we need to split each header value on "," to get the full list of "Accept" values (per RFC 2616) - // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 - for _, mediaType := range strings.Split(acceptHeader, ",") { - // remove "; q=..." if present - if i := strings.Index(mediaType, ";"); i >= 0 { - mediaType = mediaType[:i] - } - - // it's common (but not required) for Accept values to be space separated ("a/b, c/d, e/f") - mediaType = strings.TrimSpace(mediaType) - - if mediaType == schema2.MediaTypeManifest { - supportsSchema2 = true - } - if mediaType == manifestlist.MediaTypeManifestList { - supportsManifestList = true - } - if mediaType == v1.MediaTypeImageManifest { - supportsOCISchema = true - } - if mediaType == v1.MediaTypeImageIndex { - supportsOCIImageIndex = true - } - } - } - supportsOCI := supportsOCISchema || supportsOCIImageIndex - - ctxu.GetLogger(imh).Debug("GetImageManifest") - manifests, err := imh.Repository.Manifests(imh) - if err != nil { - imh.Errors = append(imh.Errors, err) - return - } - - var manifest distribution.Manifest - if imh.Tag != "" { - tags := imh.Repository.Tags(imh) - var desc distribution.Descriptor - if !supportsOCI { - desc, err = tags.Get(imh, imh.Tag) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - return - } - } else { - desc, err = tags.Get(imh, imh.annotatedTag(supportsOCI)) - if err != nil { - desc, err = tags.Get(imh, imh.annotatedTag(false)) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - return - } - } - } - imh.Digest = desc.Digest - } - - if etagMatch(r, imh.Digest.String()) { - w.WriteHeader(http.StatusNotModified) - return - } - - var options []distribution.ManifestServiceOption - if imh.Tag != "" { - options = append(options, distribution.WithTag(imh.annotatedTag(supportsOCI))) - } - manifest, err = manifests.Get(imh, imh.Digest, options...) - if err != nil { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - return - } - - schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) - manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) - isAnOCIManifest := isSchema2 && (schema2Manifest.MediaType == v1.MediaTypeImageManifest) - isAnOCIImageIndex := isManifestList && (manifestList.MediaType == v1.MediaTypeImageIndex) - - badCombinations := [][]bool{ - {isSchema2 && !isAnOCIManifest, supportsOCISchema && !supportsSchema2}, - {isManifestList && !isAnOCIImageIndex, supportsOCIImageIndex && !supportsManifestList}, - {isAnOCIManifest, !supportsOCISchema && supportsSchema2}, - {isAnOCIImageIndex, !supportsOCIImageIndex && supportsManifestList}, - } - for i, combo := range badCombinations { - if combo[0] && combo[1] { - fmt.Printf("\n\nbad combo! %d\n\n\n", i) - w.WriteHeader(http.StatusNotFound) - return - } - } - if isAnOCIManifest { - fmt.Print("\n\nreturning OCI manifest\n\n") - } else if isSchema2 { - fmt.Print("\n\nreturning schema 2 manifest\n\n") - } else { - fmt.Print("\n\nreturning schema 1 manifest\n\n") - } - - // Only rewrite schema2 manifests when they are being fetched by tag. - // If they are being fetched by digest, we can't return something not - // matching the digest. - if imh.Tag != "" && isSchema2 && !(supportsSchema2 || supportsOCISchema) { - // Rewrite manifest in schema1 format - dcontext.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) - - manifest, err = imh.convertSchema2Manifest(schema2Manifest) - if err != nil { - return - } - } else if imh.Tag != "" && isManifestList && !(supportsManifestList || supportsOCIImageIndex) { - // Rewrite manifest in schema1 format - dcontext.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) - - // Find the image manifest corresponding to the default - // platform - var manifestDigest digest.Digest - for _, manifestDescriptor := range manifestList.Manifests { - if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS { - manifestDigest = manifestDescriptor.Digest - break - } - } - - if manifestDigest == "" { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) - return - } - - manifest, err = manifests.Get(imh, manifestDigest) - if err != nil { - if _, ok := err.(distribution.ErrManifestUnknownRevision); ok { - imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) - } else { - imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) - } - return - } - - // If necessary, convert the image manifest - if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supportsSchema2 { - manifest, err = imh.convertSchema2Manifest(schema2Manifest) - if err != nil { - return - } - } else { - imh.Digest = manifestDigest - } - } - - ct, p, err := manifest.Payload() - if err != nil { - return - } - - w.Header().Set("Content-Type", ct) - w.Header().Set("Content-Length", fmt.Sprint(len(p))) - w.Header().Set("Docker-Content-Digest", imh.Digest.String()) - w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) - w.Write(p) - - fmt.Printf("\n\nSucceeded in getting the manifest!\n\n\n") -} - func (imh *manifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { targetDescriptor := schema2Manifest.Target() blobs := imh.Repository.Blobs(imh) From b0cef05626043df250d2ddc6ab84c87b6913b879 Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Tue, 18 Jul 2017 16:20:37 -0500 Subject: [PATCH 105/276] removes oci. namespace feature Signed-off-by: Mike Brown --- registry/handlers/manifests.go | 38 ++++++++++------------------------ 1 file changed, 11 insertions(+), 27 deletions(-) diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go index c6e17ada..ae52bff9 100644 --- a/registry/handlers/manifests.go +++ b/registry/handlers/manifests.go @@ -27,6 +27,7 @@ const ( defaultArch = "amd64" defaultOS = "linux" maxManifestBodySize = 4 << 20 + imageClass = "image" ) // manifestDispatcher takes the request context and builds the @@ -109,15 +110,11 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) } } } - supportsOCI := supportsOCISchema || supportsOCIImageIndex if imh.Tag != "" { tags := imh.Repository.Tags(imh) var desc distribution.Descriptor - desc, err = tags.Get(imh, imh.annotatedTag(supportsOCI)) - if err != nil && supportsOCI { // in the supportsOCI case fall back to the non OCI image - desc, err = tags.Get(imh, imh.annotatedTag(false)) - } + desc, err = tags.Get(imh, imh.Tag) if err != nil { if _, ok := err.(distribution.ErrTagUnknown); ok { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) @@ -136,14 +133,10 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) var options []distribution.ManifestServiceOption if imh.Tag != "" { - options = append(options, distribution.WithTag(imh.annotatedTag(supportsOCI))) + options = append(options, distribution.WithTag(imh.Tag)) } var manifest distribution.Manifest manifest, err = manifests.Get(imh, imh.Digest, options...) - if err != nil && supportsOCI && imh.Tag != "" { // in the supportsOCI case fall back to the non OCI image - options = append(options[:0], distribution.WithTag(imh.annotatedTag(false))) - manifest, err = manifests.Get(imh, imh.Digest, options...) - } if err != nil { if _, ok := err.(distribution.ErrManifestUnknownRevision); ok { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) @@ -266,7 +259,7 @@ func (imh *manifestHandler) convertSchema2Manifest(schema2Manifest *schema2.Dese builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON) for _, d := range schema2Manifest.Layers { - if err := builder.AppendReference(d); err != nil { + if err = builder.AppendReference(d); err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return nil, err } @@ -336,10 +329,10 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) var options []distribution.ManifestServiceOption if imh.Tag != "" { - options = append(options, distribution.WithTag(imh.annotatedTag(isAnOCIManifest))) + options = append(options, distribution.WithTag(imh.Tag)) } - if err := imh.applyResourcePolicy(manifest); err != nil { + if err = imh.applyResourcePolicy(manifest); err != nil { imh.Errors = append(imh.Errors, err) return } @@ -388,7 +381,7 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) // Tag this manifest if imh.Tag != "" { tags := imh.Repository.Tags(imh) - err = tags.Tag(imh, imh.annotatedTag(isAnOCIManifest), desc) + err = tags.Tag(imh, imh.Tag, desc) if err != nil { fmt.Printf("\n\nXXX 4: %T: %v\n\n\n", err, err) imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) @@ -431,11 +424,11 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) var class string switch m := manifest.(type) { case *schema1.SignedManifest: - class = "image" + class = imageClass case *schema2.DeserializedManifest: switch m.Config.MediaType { case schema2.MediaTypeImageConfig: - class = "image" + class = imageClass case schema2.MediaTypePluginConfig: class = "plugin" default: @@ -445,7 +438,7 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) case *ocischema.DeserializedManifest: switch m.Config.MediaType { case v1.MediaTypeImageConfig: - class = "image" + class = imageClass default: message := fmt.Sprintf("unknown manifest class for %s", m.Config.MediaType) return errcode.ErrorCodeDenied.WithMessage(message) @@ -476,7 +469,7 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) for _, r := range resources { if r.Name == n { if r.Class == "" { - r.Class = "image" + r.Class = imageClass } if r.Class == class { return nil @@ -540,12 +533,3 @@ func (imh *manifestHandler) DeleteManifest(w http.ResponseWriter, r *http.Reques w.WriteHeader(http.StatusAccepted) } - -// annotatedTag will annotate OCI tags by prepending a string, and leave docker -// tags unmodified. -func (imh *manifestHandler) annotatedTag(oci bool) string { - if oci { - return "oci." + imh.Tag - } - return imh.Tag -} From 9e3f78b8c84b21cb1f5774db6a34924209654f29 Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Wed, 19 Jul 2017 13:29:10 -0500 Subject: [PATCH 106/276] addresses minor debug comments Signed-off-by: Mike Brown --- registry/handlers/manifests.go | 31 +++++++++----------------- registry/storage/ocimanifesthandler.go | 2 +- 2 files changed, 12 insertions(+), 21 deletions(-) diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go index ae52bff9..2c6c7856 100644 --- a/registry/handlers/manifests.go +++ b/registry/handlers/manifests.go @@ -152,22 +152,22 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) isAnOCIImageIndex := isManifestList && (manifestList.MediaType == v1.MediaTypeImageIndex) if (isSchema2 && !isAnOCIManifest) && (supportsOCISchema && !supportsSchema2) { - fmt.Printf("\n\nmanifest is schema2, but accept header only supports OCISchema \n\n") + ctxu.GetLogger(imh).Debug("manifest is schema2, but accept header only supports OCISchema") w.WriteHeader(http.StatusNotFound) return } if (isManifestList && !isAnOCIImageIndex) && (supportsOCIImageIndex && !supportsManifestList) { - fmt.Printf("\n\nmanifestlist is not OCI, but accept header only supports an OCI manifestlist\n\n") + ctxu.GetLogger(imh).Debug("manifestlist is not OCI, but accept header only supports an OCI manifestlist") w.WriteHeader(http.StatusNotFound) return } if isAnOCIManifest && (!supportsOCISchema && supportsSchema2) { - fmt.Printf("\n\nmanifest is OCI, but accept header only supports schema2\n\n") + ctxu.GetLogger(imh).Debug("manifest is OCI, but accept header only supports schema2") w.WriteHeader(http.StatusNotFound) return } if isAnOCIImageIndex && (!supportsOCIImageIndex && supportsManifestList) { - fmt.Printf("\n\nmanifestlist is OCI, but accept header only supports non-OCI manifestlists\n\n") + ctxu.GetLogger(imh).Debug("manifestlist is OCI, but accept header only supports non-OCI manifestlists") w.WriteHeader(http.StatusNotFound) return } @@ -322,9 +322,9 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) isAnOCIManifest := mediaType == v1.MediaTypeImageManifest || mediaType == v1.MediaTypeImageIndex if isAnOCIManifest { - fmt.Printf("\n\nPutting an OCI Manifest!\n\n\n") + ctxu.GetLogger(imh).Debug("Putting an OCI Manifest!") } else { - fmt.Printf("\n\nPutting a Docker Manifest!\n\n\n") + ctxu.GetLogger(imh).Debug("Putting a Docker Manifest!") } var options []distribution.ManifestServiceOption @@ -342,12 +342,10 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) // TODO(stevvooe): These error handling switches really need to be // handled by an app global mapper. if err == distribution.ErrUnsupported { - fmt.Printf("\n\nXXX 1\n\n\n") imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) return } if err == distribution.ErrAccessDenied { - fmt.Printf("\n\nXXX 2\n\n\n") imh.Errors = append(imh.Errors, errcode.ErrorCodeDenied) return } @@ -374,7 +372,6 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) default: imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } - fmt.Printf("\n\nXXX 3\n\n\n") return } @@ -383,7 +380,6 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) tags := imh.Repository.Tags(imh) err = tags.Tag(imh, imh.Tag, desc) if err != nil { - fmt.Printf("\n\nXXX 4: %T: %v\n\n\n", err, err) imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } @@ -393,7 +389,6 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) // Construct a canonical url for the uploaded manifest. ref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest) if err != nil { - fmt.Printf("\n\nXXX 5\n\n\n") imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } @@ -410,7 +405,7 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) w.WriteHeader(http.StatusCreated) - fmt.Printf("\n\nSucceeded in putting manifest!\n\n\n") + ctxu.GetLogger(imh).Debug("Succeeded in putting manifest!") } // applyResourcePolicy checks whether the resource class matches what has @@ -432,16 +427,14 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) case schema2.MediaTypePluginConfig: class = "plugin" default: - message := fmt.Sprintf("unknown manifest class for %s", m.Config.MediaType) - return errcode.ErrorCodeDenied.WithMessage(message) + return errcode.ErrorCodeDenied.WithMessage("unknown manifest class for " + m.Config.MediaType) } case *ocischema.DeserializedManifest: switch m.Config.MediaType { case v1.MediaTypeImageConfig: class = imageClass default: - message := fmt.Sprintf("unknown manifest class for %s", m.Config.MediaType) - return errcode.ErrorCodeDenied.WithMessage(message) + return errcode.ErrorCodeDenied.WithMessage("unknown manifest class for " + m.Config.MediaType) } } @@ -458,8 +451,7 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) } } if !allowedClass { - message := fmt.Sprintf("registry does not allow %s manifest", class) - return errcode.ErrorCodeDenied.WithMessage(message) + return errcode.ErrorCodeDenied.WithMessage(fmt.Sprintf("registry does not allow %s manifest", class)) } resources := auth.AuthorizedResources(imh) @@ -480,8 +472,7 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) // resource was found but no matching class was found if foundResource { - message := fmt.Sprintf("repository not authorized for %s manifest", class) - return errcode.ErrorCodeDenied.WithMessage(message) + return errcode.ErrorCodeDenied.WithMessage(fmt.Sprintf("repository not authorized for %s manifest", class)) } return nil diff --git a/registry/storage/ocimanifesthandler.go b/registry/storage/ocimanifesthandler.go index 148b7a2e..0efef56b 100644 --- a/registry/storage/ocimanifesthandler.go +++ b/registry/storage/ocimanifesthandler.go @@ -83,7 +83,7 @@ func (ms *ocischemaManifestHandler) verifyManifest(ctx context.Context, mnfst oc // TODO: mikebrow/steveoe verify we should treat oci nondistributable like foreign layers? case v1.MediaTypeImageLayerNonDistributable, v1.MediaTypeImageLayerNonDistributableGzip: // Clients download this layer from an external URL, so do not check for - // its presense. + // its presence. if len(descriptor.URLs) == 0 { err = errMissingURL } From 6bae7ca5976ac55d8890a12c19e286ddc58a7719 Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Thu, 20 Jul 2017 12:48:46 -0500 Subject: [PATCH 107/276] refactor adding enum for storage types Signed-off-by: Mike Brown --- manifest/manifestlist/manifestlist_test.go | 1 + manifest/ocischema/builder_test.go | 1 + manifest/ocischema/manifest.go | 1 - manifest/ocischema/manifest_test.go | 1 + registry/handlers/manifests.go | 67 ++++++++++++---------- registry/storage/driver/storagedriver.go | 2 +- 6 files changed, 41 insertions(+), 32 deletions(-) diff --git a/manifest/manifestlist/manifestlist_test.go b/manifest/manifestlist/manifestlist_test.go index 5ab328dd..6909895b 100644 --- a/manifest/manifestlist/manifestlist_test.go +++ b/manifest/manifestlist/manifestlist_test.go @@ -111,6 +111,7 @@ func TestManifestList(t *testing.T) { } } +// TODO (mikebrow): add annotations on the index and individual manifests var expectedOCIImageIndexSerialization = []byte(`{ "schemaVersion": 2, "mediaType": "application/vnd.oci.image.index.v1+json", diff --git a/manifest/ocischema/builder_test.go b/manifest/ocischema/builder_test.go index 33e9164f..ca785495 100644 --- a/manifest/ocischema/builder_test.go +++ b/manifest/ocischema/builder_test.go @@ -10,6 +10,7 @@ import ( "github.com/opencontainers/image-spec/specs-go/v1" ) +// TODO (not assigned): consider making mockBlobService common for ocischema, schema2, and schema1 type mockBlobService struct { descriptors map[digest.Digest]distribution.Descriptor } diff --git a/manifest/ocischema/manifest.go b/manifest/ocischema/manifest.go index a5b199d4..c3d9046a 100644 --- a/manifest/ocischema/manifest.go +++ b/manifest/ocischema/manifest.go @@ -57,7 +57,6 @@ func (m Manifest) References() []distribution.Descriptor { references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) references = append(references, m.Config) references = append(references, m.Layers...) - // TODO: (mikebrow/stevvooe) should we return annotations as references return references } diff --git a/manifest/ocischema/manifest_test.go b/manifest/ocischema/manifest_test.go index 55dc0306..714c50a5 100644 --- a/manifest/ocischema/manifest_test.go +++ b/manifest/ocischema/manifest_test.go @@ -10,6 +10,7 @@ import ( "github.com/opencontainers/image-spec/specs-go/v1" ) +// TODO (mikebrow): add annotations to the test var expectedManifestSerialization = []byte(`{ "schemaVersion": 2, "mediaType": "application/vnd.oci.image.manifest.v1+json", diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go index 2c6c7856..ad39ccb5 100644 --- a/registry/handlers/manifests.go +++ b/registry/handlers/manifests.go @@ -30,6 +30,17 @@ const ( imageClass = "image" ) +type storageType int + +const ( + manifestSchema1 storageType = iota // 0 + manifestSchema2 // 1 + manifestlistSchema // 2 + ociSchema // 3 + ociImageIndexSchema // 4 + numStorageTypes // 5 +) + // manifestDispatcher takes the request context and builds the // appropriate handler for handling manifest requests. func manifestDispatcher(ctx *Context, r *http.Request) http.Handler { @@ -75,10 +86,8 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) imh.Errors = append(imh.Errors, err) return } - supportsSchema2 := false - supportsManifestList := false - supportsOCISchema := false - supportsOCIImageIndex := false + var supports [numStorageTypes]bool + // this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values // https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202 for _, acceptHeader := range r.Header["Accept"] { @@ -97,16 +106,16 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) mediaType = strings.TrimSpace(mediaType) if mediaType == schema2.MediaTypeManifest { - supportsSchema2 = true + supports[manifestSchema2] = true } if mediaType == manifestlist.MediaTypeManifestList { - supportsManifestList = true + supports[manifestlistSchema] = true } if mediaType == v1.MediaTypeImageManifest { - supportsOCISchema = true + supports[ociSchema] = true } if mediaType == v1.MediaTypeImageIndex { - supportsOCIImageIndex = true + supports[ociImageIndexSchema] = true } } } @@ -145,36 +154,34 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) } return } - + // determine the type of the returned manifest + manifestType := manifestSchema1 schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) - isAnOCIManifest := isSchema2 && (schema2Manifest.MediaType == v1.MediaTypeImageManifest) - isAnOCIImageIndex := isManifestList && (manifestList.MediaType == v1.MediaTypeImageIndex) + if isSchema2 { + manifestType = manifestSchema2 + } else if _, isOCImanifest := manifest.(*ocischema.DeserializedManifest); isOCImanifest { + manifestType = ociSchema + } else if isManifestList { + if manifestList.MediaType == manifestlist.MediaTypeManifestList { + manifestType = manifestlistSchema + } else if manifestList.MediaType == v1.MediaTypeImageIndex { + manifestType = ociImageIndexSchema + } + } - if (isSchema2 && !isAnOCIManifest) && (supportsOCISchema && !supportsSchema2) { - ctxu.GetLogger(imh).Debug("manifest is schema2, but accept header only supports OCISchema") - w.WriteHeader(http.StatusNotFound) + if manifestType == ociSchema && !supports[ociSchema] { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithMessage("OCI manifest found, but accept header does not support OCI manifests")) return } - if (isManifestList && !isAnOCIImageIndex) && (supportsOCIImageIndex && !supportsManifestList) { - ctxu.GetLogger(imh).Debug("manifestlist is not OCI, but accept header only supports an OCI manifestlist") - w.WriteHeader(http.StatusNotFound) - return - } - if isAnOCIManifest && (!supportsOCISchema && supportsSchema2) { - ctxu.GetLogger(imh).Debug("manifest is OCI, but accept header only supports schema2") - w.WriteHeader(http.StatusNotFound) - return - } - if isAnOCIImageIndex && (!supportsOCIImageIndex && supportsManifestList) { - ctxu.GetLogger(imh).Debug("manifestlist is OCI, but accept header only supports non-OCI manifestlists") - w.WriteHeader(http.StatusNotFound) + if manifestType == ociImageIndexSchema && !supports[ociImageIndexSchema] { + imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithMessage("OCI index found, but accept header does not support OCI indexes")) return } // Only rewrite schema2 manifests when they are being fetched by tag. // If they are being fetched by digest, we can't return something not // matching the digest. - if imh.Tag != "" && isSchema2 && !(supportsSchema2 || supportsOCISchema) { + if imh.Tag != "" && manifestType == manifestSchema2 && !supports[manifestSchema2] { // Rewrite manifest in schema1 format ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) @@ -182,7 +189,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) if err != nil { return } - } else if imh.Tag != "" && isManifestList && !(supportsManifestList || supportsOCIImageIndex) { + } else if imh.Tag != "" && manifestType == manifestlistSchema && !supports[manifestlistSchema] { // Rewrite manifest in schema1 format ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) @@ -212,7 +219,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) } // If necessary, convert the image manifest - if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !(supportsSchema2 || supportsOCISchema) { + if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supports[manifestSchema2] { manifest, err = imh.convertSchema2Manifest(schema2Manifest) if err != nil { return diff --git a/registry/storage/driver/storagedriver.go b/registry/storage/driver/storagedriver.go index 46c1b9e0..b220713f 100644 --- a/registry/storage/driver/storagedriver.go +++ b/registry/storage/driver/storagedriver.go @@ -116,7 +116,7 @@ type FileWriter interface { // number of path components separated by slashes, where each component is // restricted to alphanumeric characters or a period, underscore, or // hyphen. -var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._:-]+)+$`) +var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) // ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. type ErrUnsupportedMethod struct { From c1532332ad7fa88b692af45658e003e71a5a075f Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Thu, 20 Jul 2017 15:01:25 -0500 Subject: [PATCH 108/276] update to image spec v1.0.0 Signed-off-by: Mike Brown --- vendor.conf | 2 +- vendor/github.com/opencontainers/image-spec/specs-go/version.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vendor.conf b/vendor.conf index 4f52b597..df06259e 100644 --- a/vendor.conf +++ b/vendor.conf @@ -49,4 +49,4 @@ gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb -github.com/opencontainers/image-spec e0536ae0cb47b8fed6afdaa9ba2589f6bd915caa +github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882 diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index f4cda6ed..e3eee29b 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc6-dev" + VersionDev = "" ) // Version is the specification version that the package types support. From ec2aa05cdfa5b2318554841818f7b18d5be4582a Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Thu, 20 Jul 2017 20:44:02 -0500 Subject: [PATCH 109/276] addressing comments from stevvooe Signed-off-by: Mike Brown --- manifest/ocischema/builder_test.go | 150 +++++++------------- manifest/ocischema/manifest.go | 2 +- manifest/ocischema/manifest_test.go | 30 ++-- registry/handlers/manifests.go | 10 +- registry/storage/ocimanifesthandler.go | 18 --- registry/storage/ocimanifesthandler_test.go | 20 +-- 6 files changed, 86 insertions(+), 144 deletions(-) diff --git a/manifest/ocischema/builder_test.go b/manifest/ocischema/builder_test.go index ca785495..27562cfd 100644 --- a/manifest/ocischema/builder_test.go +++ b/manifest/ocischema/builder_test.go @@ -10,7 +10,6 @@ import ( "github.com/opencontainers/image-spec/specs-go/v1" ) -// TODO (not assigned): consider making mockBlobService common for ocischema, schema2, and schema1 type mockBlobService struct { descriptors map[digest.Digest]distribution.Descriptor } @@ -50,110 +49,65 @@ func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution. func TestBuilder(t *testing.T) { imgJSON := []byte(`{ + "created": "2015-10-31T22:22:56.015925234Z", + "author": "Alyssa P. Hacker ", "architecture": "amd64", - "config": { - "AttachStderr": false, - "AttachStdin": false, - "AttachStdout": false, - "Cmd": [ - "/bin/sh", - "-c", - "echo hi" - ], - "Domainname": "", - "Entrypoint": null, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "derived=true", - "asdf=true" - ], - "Hostname": "23304fc829f9", - "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", - "Labels": {}, - "OnBuild": [], - "OpenStdin": false, - "StdinOnce": false, - "Tty": false, - "User": "", - "Volumes": null, - "WorkingDir": "" - }, - "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", - "container_config": { - "AttachStderr": false, - "AttachStdin": false, - "AttachStdout": false, - "Cmd": [ - "/bin/sh", - "-c", - "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" - ], - "Domainname": "", - "Entrypoint": null, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "derived=true", - "asdf=true" - ], - "Hostname": "23304fc829f9", - "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", - "Labels": {}, - "OnBuild": [], - "OpenStdin": false, - "StdinOnce": false, - "Tty": false, - "User": "", - "Volumes": null, - "WorkingDir": "" - }, - "created": "2015-11-04T23:06:32.365666163Z", - "docker_version": "1.9.0-dev", - "history": [ - { - "created": "2015-10-31T22:22:54.690851953Z", - "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" - }, - { - "created": "2015-10-31T22:22:55.613815829Z", - "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" - }, - { - "created": "2015-11-04T23:06:30.934316144Z", - "created_by": "/bin/sh -c #(nop) ENV derived=true", - "empty_layer": true - }, - { - "created": "2015-11-04T23:06:31.192097572Z", - "created_by": "/bin/sh -c #(nop) ENV asdf=true", - "empty_layer": true - }, - { - "created": "2015-11-04T23:06:32.083868454Z", - "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" - }, - { - "created": "2015-11-04T23:06:32.365666163Z", - "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", - "empty_layer": true - } - ], "os": "linux", - "rootfs": { - "diff_ids": [ - "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", - "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" + "config": { + "User": "alice", + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=oci_is_a", + "BAR=well_written_spec" ], - "type": "layers" - } + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {} + }, + "WorkingDir": "/home/alice", + "Labels": { + "com.example.project.git.url": "https://example.com/project.git", + "com.example.project.git.commit": "45a939b2999782a3f005621a8d0f29aa387e1d6b" + } + }, + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + }, + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", + "empty_layer": true + } + ] }`) configDigest := digest.FromBytes(imgJSON) descriptors := []distribution.Descriptor{ { - Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), - Size: 5312, - MediaType: v1.MediaTypeImageLayerGzip, + Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + Size: 5312, + MediaType: v1.MediaTypeImageLayerGzip, + Annotations: map[string]string{"apple": "orange", "lettuce": "wrap"}, }, { Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), @@ -200,7 +154,7 @@ func TestBuilder(t *testing.T) { if target.MediaType != v1.MediaTypeImageConfig { t.Fatalf("unexpected media type in target: %s", target.MediaType) } - if target.Size != 3153 { + if target.Size != 1582 { t.Fatalf("unexpected size in target: %d", target.Size) } diff --git a/manifest/ocischema/manifest.go b/manifest/ocischema/manifest.go index c3d9046a..afab12bd 100644 --- a/manifest/ocischema/manifest.go +++ b/manifest/ocischema/manifest.go @@ -15,7 +15,7 @@ var ( // SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest. SchemaVersion = manifest.Versioned{ - SchemaVersion: 2, // TODO: (mikebrow/stevvooe) this could be confusing cause oci version 1 is closer to docker 2 than 1 + SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version MediaType: v1.MediaTypeImageManifest, } ) diff --git a/manifest/ocischema/manifest_test.go b/manifest/ocischema/manifest_test.go index 714c50a5..c37fe8fb 100644 --- a/manifest/ocischema/manifest_test.go +++ b/manifest/ocischema/manifest_test.go @@ -17,13 +17,19 @@ var expectedManifestSerialization = []byte(`{ "config": { "mediaType": "application/vnd.oci.image.config.v1+json", "size": 985, - "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + "annotations": { + "apple": "orange" + } }, "layers": [ { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 153263, - "digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" + "digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b", + "annotations": { + "lettuce": "wrap" + } } ] }`) @@ -32,15 +38,17 @@ func TestManifest(t *testing.T) { manifest := Manifest{ Versioned: SchemaVersion, Config: distribution.Descriptor{ - Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", - Size: 985, - MediaType: v1.MediaTypeImageConfig, + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + MediaType: v1.MediaTypeImageConfig, + Annotations: map[string]string{"apple": "orange"}, }, Layers: []distribution.Descriptor{ { - Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b", - Size: 153263, - MediaType: v1.MediaTypeImageLayerGzip, + Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b", + Size: 153263, + MediaType: v1.MediaTypeImageLayerGzip, + Annotations: map[string]string{"lettuce": "wrap"}, }, }, } @@ -90,6 +98,9 @@ func TestManifest(t *testing.T) { if target.Size != 985 { t.Fatalf("unexpected size in target: %d", target.Size) } + if target.Annotations["apple"] != "orange" { + t.Fatalf("unexpected annotation in target: %s", target.Annotations["apple"]) + } references := deserialized.References() if len(references) != 2 { @@ -110,4 +121,7 @@ func TestManifest(t *testing.T) { if references[1].Size != 153263 { t.Fatalf("unexpected size in reference: %d", references[0].Size) } + if references[1].Annotations["lettuce"] != "wrap" { + t.Fatalf("unexpected annotation in reference: %s", references[1].Annotations["lettuce"]) + } } diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go index ad39ccb5..53c7c807 100644 --- a/registry/handlers/manifests.go +++ b/registry/handlers/manifests.go @@ -122,8 +122,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) if imh.Tag != "" { tags := imh.Repository.Tags(imh) - var desc distribution.Descriptor - desc, err = tags.Get(imh, imh.Tag) + desc, err := tags.Get(imh, imh.Tag) if err != nil { if _, ok := err.(distribution.ErrTagUnknown); ok { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) @@ -144,8 +143,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) if imh.Tag != "" { options = append(options, distribution.WithTag(imh.Tag)) } - var manifest distribution.Manifest - manifest, err = manifests.Get(imh, imh.Digest, options...) + manifest, err := manifests.Get(imh, imh.Digest, options...) if err != nil { if _, ok := err.(distribution.ErrManifestUnknownRevision); ok { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) @@ -266,7 +264,7 @@ func (imh *manifestHandler) convertSchema2Manifest(schema2Manifest *schema2.Dese builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON) for _, d := range schema2Manifest.Layers { - if err = builder.AppendReference(d); err != nil { + if err := builder.AppendReference(d); err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return nil, err } @@ -339,7 +337,7 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) options = append(options, distribution.WithTag(imh.Tag)) } - if err = imh.applyResourcePolicy(manifest); err != nil { + if err := imh.applyResourcePolicy(manifest); err != nil { imh.Errors = append(imh.Errors, err) return } diff --git a/registry/storage/ocimanifesthandler.go b/registry/storage/ocimanifesthandler.go index 0efef56b..c61beb4b 100644 --- a/registry/storage/ocimanifesthandler.go +++ b/registry/storage/ocimanifesthandler.go @@ -3,7 +3,6 @@ package storage import ( "encoding/json" "fmt" - "net/url" "github.com/docker/distribution" "github.com/docker/distribution/context" @@ -80,23 +79,6 @@ func (ms *ocischemaManifestHandler) verifyManifest(ctx context.Context, mnfst oc var err error switch descriptor.MediaType { - // TODO: mikebrow/steveoe verify we should treat oci nondistributable like foreign layers? - case v1.MediaTypeImageLayerNonDistributable, v1.MediaTypeImageLayerNonDistributableGzip: - // Clients download this layer from an external URL, so do not check for - // its presence. - if len(descriptor.URLs) == 0 { - err = errMissingURL - } - allow := ms.manifestURLs.allow - deny := ms.manifestURLs.deny - for _, u := range descriptor.URLs { - var pu *url.URL - pu, err = url.Parse(u) - if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) { - err = errInvalidURL - break - } - } case v1.MediaTypeImageManifest: var exists bool exists, err = manifestService.Exists(ctx, descriptor.Digest) diff --git a/registry/storage/ocimanifesthandler_test.go b/registry/storage/ocimanifesthandler_test.go index d04ce531..047f1c3a 100644 --- a/registry/storage/ocimanifesthandler_test.go +++ b/registry/storage/ocimanifesthandler_test.go @@ -53,12 +53,6 @@ func TestVerifyOCIManifestNonDistributableLayer(t *testing.T) { cases := []testcase{ { - nonDistributableLayer, - nil, - errMissingURL, - }, - { - // regular layers may have foreign urls (non-Distributable Layers) layer, []string{"http://foo/bar"}, nil, @@ -66,37 +60,37 @@ func TestVerifyOCIManifestNonDistributableLayer(t *testing.T) { { nonDistributableLayer, []string{"file:///local/file"}, - errInvalidURL, + nil, }, { nonDistributableLayer, []string{"http://foo/bar#baz"}, - errInvalidURL, + nil, }, { nonDistributableLayer, []string{""}, - errInvalidURL, + nil, }, { nonDistributableLayer, []string{"https://foo/bar", ""}, - errInvalidURL, + nil, }, { nonDistributableLayer, []string{"", "https://foo/bar"}, - errInvalidURL, + nil, }, { nonDistributableLayer, []string{"http://nope/bar"}, - errInvalidURL, + nil, }, { nonDistributableLayer, []string{"http://foo/nope"}, - errInvalidURL, + nil, }, { nonDistributableLayer, From f186e1da1cefaf5229f2d60d50b536078012e993 Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Mon, 31 Jul 2017 18:34:11 -0500 Subject: [PATCH 110/276] adding some support for annotations at the manifest level Signed-off-by: Mike Brown --- manifest/ocischema/builder.go | 17 +++++++++++------ manifest/ocischema/builder_test.go | 11 +++++++++-- manifest/ocischema/manifest_test.go | 10 ++++++++-- 3 files changed, 28 insertions(+), 10 deletions(-) diff --git a/manifest/ocischema/builder.go b/manifest/ocischema/builder.go index a6d6633a..c10376f8 100644 --- a/manifest/ocischema/builder.go +++ b/manifest/ocischema/builder.go @@ -18,15 +18,19 @@ type builder struct { // layers is a list of layer descriptors that gets built by successive // calls to AppendReference. layers []distribution.Descriptor + + // Annotations contains arbitrary metadata relating to the targeted content. + annotations map[string]string } // NewManifestBuilder is used to build new manifests for the current schema // version. It takes a BlobService so it can publish the configuration blob -// as part of the Build process. -func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { +// as part of the Build process, and annotations. +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotations map[string]string) distribution.ManifestBuilder { mb := &builder{ - bs: bs, - configJSON: make([]byte, len(configJSON)), + bs: bs, + configJSON: make([]byte, len(configJSON)), + annotations: annotations, } copy(mb.configJSON, configJSON) @@ -36,8 +40,9 @@ func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribu // Build produces a final manifest from the given references. func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { m := Manifest{ - Versioned: SchemaVersion, - Layers: make([]distribution.Descriptor, len(mb.layers)), + Versioned: SchemaVersion, + Layers: make([]distribution.Descriptor, len(mb.layers)), + Annotations: mb.annotations, } copy(m.Layers, mb.layers) diff --git a/manifest/ocischema/builder_test.go b/manifest/ocischema/builder_test.go index 27562cfd..557681ba 100644 --- a/manifest/ocischema/builder_test.go +++ b/manifest/ocischema/builder_test.go @@ -88,6 +88,9 @@ func TestBuilder(t *testing.T) { ], "type": "layers" }, + "annotations": { + "hot": "potato" + } "history": [ { "created": "2015-10-31T22:22:54.690851953Z", @@ -120,9 +123,10 @@ func TestBuilder(t *testing.T) { MediaType: v1.MediaTypeImageLayerGzip, }, } + annotations := map[string]string{"hot": "potato"} bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} - builder := NewManifestBuilder(bs, imgJSON) + builder := NewManifestBuilder(bs, imgJSON, annotations) for _, d := range descriptors { if err := builder.AppendReference(d); err != nil { @@ -142,6 +146,9 @@ func TestBuilder(t *testing.T) { } manifest := built.(*DeserializedManifest).Manifest + if manifest.Annotations["hot"] != "potato" { + t.Fatalf("unexpected annotation in manifest: %s", manifest.Annotations["hot"]) + } if manifest.Versioned.SchemaVersion != 2 { t.Fatal("SchemaVersion != 2") @@ -154,7 +161,7 @@ func TestBuilder(t *testing.T) { if target.MediaType != v1.MediaTypeImageConfig { t.Fatalf("unexpected media type in target: %s", target.MediaType) } - if target.Size != 1582 { + if target.Size != 1632 { t.Fatalf("unexpected size in target: %d", target.Size) } diff --git a/manifest/ocischema/manifest_test.go b/manifest/ocischema/manifest_test.go index c37fe8fb..749cb859 100644 --- a/manifest/ocischema/manifest_test.go +++ b/manifest/ocischema/manifest_test.go @@ -10,7 +10,6 @@ import ( "github.com/opencontainers/image-spec/specs-go/v1" ) -// TODO (mikebrow): add annotations to the test var expectedManifestSerialization = []byte(`{ "schemaVersion": 2, "mediaType": "application/vnd.oci.image.manifest.v1+json", @@ -31,7 +30,10 @@ var expectedManifestSerialization = []byte(`{ "lettuce": "wrap" } } - ] + ], + "annotations": { + "hot": "potato" + } }`) func TestManifest(t *testing.T) { @@ -51,6 +53,7 @@ func TestManifest(t *testing.T) { Annotations: map[string]string{"lettuce": "wrap"}, }, }, + Annotations: map[string]string{"hot": "potato"}, } deserialized, err := FromStruct(manifest) @@ -87,6 +90,9 @@ func TestManifest(t *testing.T) { if !reflect.DeepEqual(&unmarshalled, deserialized) { t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) } + if deserialized.Annotations["hot"] != "potato" { + t.Fatalf("unexpected annotation in manifest: %s", deserialized.Annotations["hot"]) + } target := deserialized.Target() if target.Digest != "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" { From 7b47fb13cfdf637218cb642121539f4e17c5afd7 Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Tue, 1 Aug 2017 17:27:52 -0500 Subject: [PATCH 111/276] update url policy support; testing for annoations in index Signed-off-by: Mike Brown --- manifest/manifestlist/manifestlist.go | 3 -- manifest/manifestlist/manifestlist_test.go | 41 ++++++++++++++++++--- registry/storage/ocimanifesthandler.go | 17 +++++++++ registry/storage/ocimanifesthandler_test.go | 21 +++++++---- 4 files changed, 66 insertions(+), 16 deletions(-) diff --git a/manifest/manifestlist/manifestlist.go b/manifest/manifestlist/manifestlist.go index 3303c6be..0d5c9af1 100644 --- a/manifest/manifestlist/manifestlist.go +++ b/manifest/manifestlist/manifestlist.go @@ -89,9 +89,6 @@ type ManifestList struct { // Config references the image configuration as a blob. Manifests []ManifestDescriptor `json:"manifests"` - - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` } // References returns the distribution descriptors for the referenced image diff --git a/manifest/manifestlist/manifestlist_test.go b/manifest/manifestlist/manifestlist_test.go index 6909895b..720b911b 100644 --- a/manifest/manifestlist/manifestlist_test.go +++ b/manifest/manifestlist/manifestlist_test.go @@ -111,7 +111,12 @@ func TestManifestList(t *testing.T) { } } -// TODO (mikebrow): add annotations on the index and individual manifests +// TODO (mikebrow): add annotations on the manifest list (index) and support for +// empty platform structs (move to Platform *Platform `json:"platform,omitempty"` +// from current Platform PlatformSpec `json:"platform"`) in the manifest descriptor. +// Requires changes to docker/distribution/manifest/manifestlist.ManifestList and .ManifestDescriptor +// and associated serialization APIs in manifestlist.go. Or split the OCI index and +// docker manifest list implementations, which would require a lot of refactoring. var expectedOCIImageIndexSerialization = []byte(`{ "schemaVersion": 2, "mediaType": "application/vnd.oci.image.index.v1+json", @@ -128,10 +133,25 @@ var expectedOCIImageIndexSerialization = []byte(`{ ] } }, + { + "mediaType": "application/vnd.oci.image.manifest.v1+json", + "size": 985, + "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + "annotations": { + "platform": "none" + }, + "platform": { + "architecture": "", + "os": "" + } + }, { "mediaType": "application/vnd.oci.image.manifest.v1+json", "size": 2392, "digest": "sha256:6346340964309634683409684360934680934608934608934608934068934608", + "annotations": { + "what": "for" + }, "platform": { "architecture": "sun4m", "os": "sunos" @@ -156,9 +176,18 @@ func TestOCIImageIndex(t *testing.T) { }, { Descriptor: distribution.Descriptor{ - Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608", - Size: 2392, - MediaType: "application/vnd.oci.image.manifest.v1+json", + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 985, + MediaType: "application/vnd.oci.image.manifest.v1+json", + Annotations: map[string]string{"platform": "none"}, + }, + }, + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608", + Size: 2392, + MediaType: "application/vnd.oci.image.manifest.v1+json", + Annotations: map[string]string{"what": "for"}, }, Platform: PlatformSpec{ Architecture: "sun4m", @@ -190,7 +219,7 @@ func TestOCIImageIndex(t *testing.T) { // Check that the canonical field has the expected value. if !bytes.Equal(expectedOCIImageIndexSerialization, canonical) { - t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedOCIImageIndexSerialization)) + t.Fatalf("manifest bytes not equal to expected: %q != %q", string(canonical), string(expectedOCIImageIndexSerialization)) } var unmarshalled DeserializedManifestList @@ -203,7 +232,7 @@ func TestOCIImageIndex(t *testing.T) { } references := deserialized.References() - if len(references) != 2 { + if len(references) != 3 { t.Fatalf("unexpected number of references: %d", len(references)) } for i := range references { diff --git a/registry/storage/ocimanifesthandler.go b/registry/storage/ocimanifesthandler.go index c61beb4b..f1fbcead 100644 --- a/registry/storage/ocimanifesthandler.go +++ b/registry/storage/ocimanifesthandler.go @@ -3,6 +3,7 @@ package storage import ( "encoding/json" "fmt" + "net/url" "github.com/docker/distribution" "github.com/docker/distribution/context" @@ -79,6 +80,22 @@ func (ms *ocischemaManifestHandler) verifyManifest(ctx context.Context, mnfst oc var err error switch descriptor.MediaType { + case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip, v1.MediaTypeImageLayerNonDistributable, v1.MediaTypeImageLayerNonDistributableGzip: + allow := ms.manifestURLs.allow + deny := ms.manifestURLs.deny + for _, u := range descriptor.URLs { + var pu *url.URL + pu, err = url.Parse(u) + if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) { + err = errInvalidURL + break + } + } + if err == nil && len(descriptor.URLs) == 0 { + // If no URLs, require that the blob exists + _, err = blobsService.Stat(ctx, descriptor.Digest) + } + case v1.MediaTypeImageManifest: var exists bool exists, err = manifestService.Exists(ctx, descriptor.Digest) diff --git a/registry/storage/ocimanifesthandler_test.go b/registry/storage/ocimanifesthandler_test.go index 047f1c3a..302294af 100644 --- a/registry/storage/ocimanifesthandler_test.go +++ b/registry/storage/ocimanifesthandler_test.go @@ -52,6 +52,11 @@ func TestVerifyOCIManifestNonDistributableLayer(t *testing.T) { } cases := []testcase{ + { + nonDistributableLayer, + nil, + distribution.ErrManifestBlobUnknown{Digest: nonDistributableLayer.Digest}, + }, { layer, []string{"http://foo/bar"}, @@ -60,37 +65,37 @@ func TestVerifyOCIManifestNonDistributableLayer(t *testing.T) { { nonDistributableLayer, []string{"file:///local/file"}, - nil, + errInvalidURL, }, { nonDistributableLayer, []string{"http://foo/bar#baz"}, - nil, + errInvalidURL, }, { nonDistributableLayer, []string{""}, - nil, + errInvalidURL, }, { nonDistributableLayer, []string{"https://foo/bar", ""}, - nil, + errInvalidURL, }, { nonDistributableLayer, []string{"", "https://foo/bar"}, - nil, + errInvalidURL, }, { nonDistributableLayer, []string{"http://nope/bar"}, - nil, + errInvalidURL, }, { nonDistributableLayer, []string{"http://foo/nope"}, - nil, + errInvalidURL, }, { nonDistributableLayer, @@ -122,6 +127,8 @@ func TestVerifyOCIManifestNonDistributableLayer(t *testing.T) { if _, ok = verr[1].(distribution.ErrManifestBlobUnknown); ok { err = verr[0] } + } else if len(verr) == 1 { + err = verr[0] } } if err != c.Err { From ad7ab0853cca5599ddf7f06bbdd76b823d23cf63 Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Thu, 31 Aug 2017 21:14:40 -0500 Subject: [PATCH 112/276] folow commit 9c88801a12a97de49abf26e9604975eececa654c Signed-off-by: Mike Brown --- manifest/ocischema/builder.go | 3 ++- manifest/ocischema/builder_test.go | 2 +- registry/handlers/manifests.go | 10 +++++----- registry/storage/ocimanifesthandler.go | 9 +++++---- registry/storage/ocimanifesthandler_test.go | 2 +- 5 files changed, 14 insertions(+), 12 deletions(-) diff --git a/manifest/ocischema/builder.go b/manifest/ocischema/builder.go index c10376f8..9bfef614 100644 --- a/manifest/ocischema/builder.go +++ b/manifest/ocischema/builder.go @@ -1,8 +1,9 @@ package ocischema import ( + "context" + "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go/v1" ) diff --git a/manifest/ocischema/builder_test.go b/manifest/ocischema/builder_test.go index 557681ba..8e751279 100644 --- a/manifest/ocischema/builder_test.go +++ b/manifest/ocischema/builder_test.go @@ -1,11 +1,11 @@ package ocischema import ( + "context" "reflect" "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go/v1" ) diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go index 53c7c807..8da3f7af 100644 --- a/registry/handlers/manifests.go +++ b/registry/handlers/manifests.go @@ -181,7 +181,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) // matching the digest. if imh.Tag != "" && manifestType == manifestSchema2 && !supports[manifestSchema2] { // Rewrite manifest in schema1 format - ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) + dcontext.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) manifest, err = imh.convertSchema2Manifest(schema2Manifest) if err != nil { @@ -189,7 +189,7 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) } } else if imh.Tag != "" && manifestType == manifestlistSchema && !supports[manifestlistSchema] { // Rewrite manifest in schema1 format - ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) + dcontext.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) // Find the image manifest corresponding to the default // platform @@ -327,9 +327,9 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) isAnOCIManifest := mediaType == v1.MediaTypeImageManifest || mediaType == v1.MediaTypeImageIndex if isAnOCIManifest { - ctxu.GetLogger(imh).Debug("Putting an OCI Manifest!") + dcontext.GetLogger(imh).Debug("Putting an OCI Manifest!") } else { - ctxu.GetLogger(imh).Debug("Putting a Docker Manifest!") + dcontext.GetLogger(imh).Debug("Putting a Docker Manifest!") } var options []distribution.ManifestServiceOption @@ -410,7 +410,7 @@ func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) w.WriteHeader(http.StatusCreated) - ctxu.GetLogger(imh).Debug("Succeeded in putting manifest!") + dcontext.GetLogger(imh).Debug("Succeeded in putting manifest!") } // applyResourcePolicy checks whether the resource class matches what has diff --git a/registry/storage/ocimanifesthandler.go b/registry/storage/ocimanifesthandler.go index f1fbcead..57ff2829 100644 --- a/registry/storage/ocimanifesthandler.go +++ b/registry/storage/ocimanifesthandler.go @@ -1,12 +1,13 @@ package storage import ( + "context" "encoding/json" "fmt" "net/url" "github.com/docker/distribution" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/manifest/ocischema" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go/v1" @@ -23,7 +24,7 @@ type ocischemaManifestHandler struct { var _ ManifestHandler = &ocischemaManifestHandler{} func (ms *ocischemaManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { - context.GetLogger(ms.ctx).Debug("(*ocischemaManifestHandler).Unmarshal") + dcontext.GetLogger(ms.ctx).Debug("(*ocischemaManifestHandler).Unmarshal") var m ocischema.DeserializedManifest if err := json.Unmarshal(content, &m); err != nil { @@ -34,7 +35,7 @@ func (ms *ocischemaManifestHandler) Unmarshal(ctx context.Context, dgst digest.D } func (ms *ocischemaManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { - context.GetLogger(ms.ctx).Debug("(*ocischemaManifestHandler).Put") + dcontext.GetLogger(ms.ctx).Debug("(*ocischemaManifestHandler).Put") m, ok := manifest.(*ocischema.DeserializedManifest) if !ok { @@ -52,7 +53,7 @@ func (ms *ocischemaManifestHandler) Put(ctx context.Context, manifest distributi revision, err := ms.blobStore.Put(ctx, mt, payload) if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + dcontext.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) return "", err } diff --git a/registry/storage/ocimanifesthandler_test.go b/registry/storage/ocimanifesthandler_test.go index 302294af..3983a9f7 100644 --- a/registry/storage/ocimanifesthandler_test.go +++ b/registry/storage/ocimanifesthandler_test.go @@ -1,11 +1,11 @@ package storage import ( + "context" "regexp" "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/ocischema" "github.com/docker/distribution/registry/storage/driver/inmemory" From f6224f78ba1c0b5e5a71678fd9d493465ddd5c9d Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Mon, 18 Jun 2018 15:18:59 -0500 Subject: [PATCH 113/276] register oci image index Signed-off-by: Mike Brown --- manifest/manifestlist/manifestlist.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/manifest/manifestlist/manifestlist.go b/manifest/manifestlist/manifestlist.go index 0d5c9af1..7be29714 100644 --- a/manifest/manifestlist/manifestlist.go +++ b/manifest/manifestlist/manifestlist.go @@ -45,6 +45,21 @@ func init() { if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } + + imageIndexFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageIndex}, err + } + err = distribution.RegisterManifestSchema(v1.MediaTypeImageIndex, imageIndexFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err)) + } } // PlatformSpec specifies a platform where a particular image manifest is From 1d47ef7b801369dc2ca257732ea91d39089ecebc Mon Sep 17 00:00:00 2001 From: "Owen W. Taylor" Date: Wed, 14 Mar 2018 16:55:45 -0400 Subject: [PATCH 114/276] Check media types when unmarshalling manifests When unmarshalling manifests from JSON, check that the MediaType field corresponds to the type that we are unmarshalling as. This makes sure that when we retrieve a manifest from the manifest store, it will have the same type as it was handled as before storing it in the manifest store. Signed-off-by: Owen W. Taylor --- manifest/manifestlist/manifestlist.go | 36 ++++++++--- manifest/manifestlist/manifestlist_test.go | 70 ++++++++++++++++++++-- manifest/ocischema/manifest.go | 5 ++ manifest/ocischema/manifest_test.go | 57 +++++++++++++++++- manifest/schema2/manifest.go | 6 ++ manifest/schema2/manifest_test.go | 57 +++++++++++++++++- 6 files changed, 214 insertions(+), 17 deletions(-) diff --git a/manifest/manifestlist/manifestlist.go b/manifest/manifestlist/manifestlist.go index 7be29714..11df4c25 100644 --- a/manifest/manifestlist/manifestlist.go +++ b/manifest/manifestlist/manifestlist.go @@ -38,6 +38,13 @@ func init() { return nil, distribution.Descriptor{}, err } + if m.MediaType != MediaTypeManifestList { + err = fmt.Errorf("mediaType in manifest list should be '%s' not '%s'", + MediaTypeManifestList, m.MediaType) + + return nil, distribution.Descriptor{}, err + } + dgst := digest.FromBytes(b) return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err } @@ -53,6 +60,13 @@ func init() { return nil, distribution.Descriptor{}, err } + if m.MediaType != v1.MediaTypeImageIndex { + err = fmt.Errorf("mediaType in image index should be '%s' not '%s'", + v1.MediaTypeImageIndex, m.MediaType) + + return nil, distribution.Descriptor{}, err + } + dgst := digest.FromBytes(b) return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageIndex}, err } @@ -130,15 +144,23 @@ type DeserializedManifestList struct { // DeserializedManifestList which contains the resulting manifest list // and its JSON representation. func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { - var m ManifestList + var mediaType string if len(descriptors) > 0 && descriptors[0].Descriptor.MediaType == v1.MediaTypeImageManifest { - m = ManifestList{ - Versioned: OCISchemaVersion, - } + mediaType = v1.MediaTypeImageIndex } else { - m = ManifestList{ - Versioned: SchemaVersion, - } + mediaType = MediaTypeManifestList + } + + return FromDescriptorsWithMediaType(descriptors, mediaType) +} + +// For testing purposes, it's useful to be able to specify the media type explicitly +func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType string) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: mediaType, + }, } m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) diff --git a/manifest/manifestlist/manifestlist_test.go b/manifest/manifestlist/manifestlist_test.go index 720b911b..00b43900 100644 --- a/manifest/manifestlist/manifestlist_test.go +++ b/manifest/manifestlist/manifestlist_test.go @@ -38,7 +38,7 @@ var expectedManifestListSerialization = []byte(`{ ] }`) -func TestManifestList(t *testing.T) { +func makeTestManifestList(t *testing.T, mediaType string) ([]ManifestDescriptor, *DeserializedManifestList) { manifestDescriptors := []ManifestDescriptor{ { Descriptor: distribution.Descriptor{ @@ -65,11 +65,16 @@ func TestManifestList(t *testing.T) { }, } - deserialized, err := FromDescriptors(manifestDescriptors) + deserialized, err := FromDescriptorsWithMediaType(manifestDescriptors, mediaType) if err != nil { t.Fatalf("error creating DeserializedManifestList: %v", err) } + return manifestDescriptors, deserialized +} + +func TestManifestList(t *testing.T) { + manifestDescriptors, deserialized := makeTestManifestList(t, MediaTypeManifestList) mediaType, canonical, _ := deserialized.Payload() if mediaType != MediaTypeManifestList { @@ -160,7 +165,7 @@ var expectedOCIImageIndexSerialization = []byte(`{ ] }`) -func TestOCIImageIndex(t *testing.T) { +func makeTestOCIImageIndex(t *testing.T, mediaType string) ([]ManifestDescriptor, *DeserializedManifestList) { manifestDescriptors := []ManifestDescriptor{ { Descriptor: distribution.Descriptor{ @@ -196,11 +201,17 @@ func TestOCIImageIndex(t *testing.T) { }, } - deserialized, err := FromDescriptors(manifestDescriptors) + deserialized, err := FromDescriptorsWithMediaType(manifestDescriptors, mediaType) if err != nil { t.Fatalf("error creating DeserializedManifestList: %v", err) } + return manifestDescriptors, deserialized +} + +func TestOCIImageIndex(t *testing.T) { + manifestDescriptors, deserialized := makeTestOCIImageIndex(t, v1.MediaTypeImageIndex) + mediaType, canonical, _ := deserialized.Payload() if mediaType != v1.MediaTypeImageIndex { @@ -241,3 +252,54 @@ func TestOCIImageIndex(t *testing.T) { } } } + +func mediaTypeTest(t *testing.T, contentType string, mediaType string, shouldError bool) { + var m *DeserializedManifestList + if contentType == MediaTypeManifestList { + _, m = makeTestManifestList(t, mediaType) + } else { + _, m = makeTestOCIImageIndex(t, mediaType) + } + + _, canonical, err := m.Payload() + if err != nil { + t.Fatalf("error getting payload, %v", err) + } + + unmarshalled, descriptor, err := distribution.UnmarshalManifest( + contentType, + canonical) + + if shouldError { + if err == nil { + t.Fatalf("bad content type should have produced error") + } + } else { + if err != nil { + t.Fatalf("error unmarshaling manifest, %v", err) + } + + asManifest := unmarshalled.(*DeserializedManifestList) + if asManifest.MediaType != mediaType { + t.Fatalf("Bad media type '%v' as unmarshalled", asManifest.MediaType) + } + + if descriptor.MediaType != contentType { + t.Fatalf("Bad media type '%v' for descriptor", descriptor.MediaType) + } + + unmarshalledMediaType, _, _ := unmarshalled.Payload() + if unmarshalledMediaType != contentType { + t.Fatalf("Bad media type '%v' for payload", unmarshalledMediaType) + } + } +} + +func TestMediaTypes(t *testing.T) { + mediaTypeTest(t, MediaTypeManifestList, "", true) + mediaTypeTest(t, MediaTypeManifestList, MediaTypeManifestList, false) + mediaTypeTest(t, MediaTypeManifestList, MediaTypeManifestList+"XXX", true) + mediaTypeTest(t, v1.MediaTypeImageIndex, "", true) + mediaTypeTest(t, v1.MediaTypeImageIndex, v1.MediaTypeImageIndex, false) + mediaTypeTest(t, v1.MediaTypeImageIndex, v1.MediaTypeImageIndex+"XXX", true) +} diff --git a/manifest/ocischema/manifest.go b/manifest/ocischema/manifest.go index afab12bd..a6850fde 100644 --- a/manifest/ocischema/manifest.go +++ b/manifest/ocischema/manifest.go @@ -97,6 +97,11 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { return err } + if manifest.MediaType != v1.MediaTypeImageManifest { + return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", + v1.MediaTypeImageManifest, manifest.MediaType) + } + m.Manifest = manifest return nil diff --git a/manifest/ocischema/manifest_test.go b/manifest/ocischema/manifest_test.go index 749cb859..de53806d 100644 --- a/manifest/ocischema/manifest_test.go +++ b/manifest/ocischema/manifest_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/docker/distribution" + "github.com/docker/distribution/manifest" "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -36,9 +37,12 @@ var expectedManifestSerialization = []byte(`{ } }`) -func TestManifest(t *testing.T) { - manifest := Manifest{ - Versioned: SchemaVersion, +func makeTestManifest(mediaType string) Manifest { + return Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: mediaType, + }, Config: distribution.Descriptor{ Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", Size: 985, @@ -55,6 +59,10 @@ func TestManifest(t *testing.T) { }, Annotations: map[string]string{"hot": "potato"}, } +} + +func TestManifest(t *testing.T) { + manifest := makeTestManifest(v1.MediaTypeImageManifest) deserialized, err := FromStruct(manifest) if err != nil { @@ -131,3 +139,46 @@ func TestManifest(t *testing.T) { t.Fatalf("unexpected annotation in reference: %s", references[1].Annotations["lettuce"]) } } + +func mediaTypeTest(t *testing.T, mediaType string, shouldError bool) { + manifest := makeTestManifest(mediaType) + + deserialized, err := FromStruct(manifest) + if err != nil { + t.Fatalf("error creating DeserializedManifest: %v", err) + } + + unmarshalled, descriptor, err := distribution.UnmarshalManifest( + v1.MediaTypeImageManifest, + deserialized.canonical) + + if shouldError { + if err == nil { + t.Fatalf("bad content type should have produced error") + } + } else { + if err != nil { + t.Fatalf("error unmarshaling manifest, %v", err) + } + + asManifest := unmarshalled.(*DeserializedManifest) + if asManifest.MediaType != mediaType { + t.Fatalf("Bad media type '%v' as unmarshalled", asManifest.MediaType) + } + + if descriptor.MediaType != v1.MediaTypeImageManifest { + t.Fatalf("Bad media type '%v' for descriptor", descriptor.MediaType) + } + + unmarshalledMediaType, _, _ := unmarshalled.Payload() + if unmarshalledMediaType != v1.MediaTypeImageManifest { + t.Fatalf("Bad media type '%v' for payload", unmarshalledMediaType) + } + } +} + +func TestMediaTypes(t *testing.T) { + mediaTypeTest(t, "", true) + mediaTypeTest(t, v1.MediaTypeImageManifest, false) + mediaTypeTest(t, v1.MediaTypeImageManifest+"XXX", true) +} diff --git a/manifest/schema2/manifest.go b/manifest/schema2/manifest.go index 26a6a334..8adbbd0f 100644 --- a/manifest/schema2/manifest.go +++ b/manifest/schema2/manifest.go @@ -116,6 +116,12 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { return err } + if manifest.MediaType != MediaTypeManifest { + return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", + MediaTypeManifest, manifest.MediaType) + + } + m.Manifest = manifest return nil diff --git a/manifest/schema2/manifest_test.go b/manifest/schema2/manifest_test.go index 86226606..912dda9a 100644 --- a/manifest/schema2/manifest_test.go +++ b/manifest/schema2/manifest_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/docker/distribution" + "github.com/docker/distribution/manifest" ) var expectedManifestSerialization = []byte(`{ @@ -26,9 +27,12 @@ var expectedManifestSerialization = []byte(`{ ] }`) -func TestManifest(t *testing.T) { - manifest := Manifest{ - Versioned: SchemaVersion, +func makeTestManifest(mediaType string) Manifest { + return Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: mediaType, + }, Config: distribution.Descriptor{ Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", Size: 985, @@ -42,6 +46,10 @@ func TestManifest(t *testing.T) { }, }, } +} + +func TestManifest(t *testing.T) { + manifest := makeTestManifest(MediaTypeManifest) deserialized, err := FromStruct(manifest) if err != nil { @@ -109,3 +117,46 @@ func TestManifest(t *testing.T) { t.Fatalf("unexpected size in reference: %d", references[0].Size) } } + +func mediaTypeTest(t *testing.T, mediaType string, shouldError bool) { + manifest := makeTestManifest(mediaType) + + deserialized, err := FromStruct(manifest) + if err != nil { + t.Fatalf("error creating DeserializedManifest: %v", err) + } + + unmarshalled, descriptor, err := distribution.UnmarshalManifest( + MediaTypeManifest, + deserialized.canonical) + + if shouldError { + if err == nil { + t.Fatalf("bad content type should have produced error") + } + } else { + if err != nil { + t.Fatalf("error unmarshaling manifest, %v", err) + } + + asManifest := unmarshalled.(*DeserializedManifest) + if asManifest.MediaType != mediaType { + t.Fatalf("Bad media type '%v' as unmarshalled", asManifest.MediaType) + } + + if descriptor.MediaType != MediaTypeManifest { + t.Fatalf("Bad media type '%v' for descriptor", descriptor.MediaType) + } + + unmarshalledMediaType, _, _ := unmarshalled.Payload() + if unmarshalledMediaType != MediaTypeManifest { + t.Fatalf("Bad media type '%v' for payload", unmarshalledMediaType) + } + } +} + +func TestMediaTypes(t *testing.T) { + mediaTypeTest(t, "", true) + mediaTypeTest(t, MediaTypeManifest, false) + mediaTypeTest(t, MediaTypeManifest+"XXX", true) +} From 60d9c5dfad8c0d44fd5ee0482f6de7b09a15fb02 Mon Sep 17 00:00:00 2001 From: "Owen W. Taylor" Date: Wed, 14 Mar 2018 17:22:36 -0400 Subject: [PATCH 115/276] Handle OCI manifests and image indexes without a media type In the OCI image specification, the MediaType field is reserved and otherwise undefined; assume that manifests without a media in storage are OCI images or image indexes, and determine which by looking at what fields are in the JSON. We do keep a check that when unmarshalling an OCI image or image index, if it has a MediaType field, it must match that media type of the upload. Signed-off-by: Owen W. Taylor --- manifest/manifestlist/manifestlist.go | 13 ++++++++++--- manifest/manifestlist/manifestlist_test.go | 2 +- manifest/ocischema/manifest.go | 6 +++--- manifest/ocischema/manifest_test.go | 2 +- registry/storage/manifeststore.go | 12 ++++++++++++ 5 files changed, 27 insertions(+), 8 deletions(-) diff --git a/manifest/manifestlist/manifestlist.go b/manifest/manifestlist/manifestlist.go index 11df4c25..a08f1e5a 100644 --- a/manifest/manifestlist/manifestlist.go +++ b/manifest/manifestlist/manifestlist.go @@ -60,8 +60,8 @@ func init() { return nil, distribution.Descriptor{}, err } - if m.MediaType != v1.MediaTypeImageIndex { - err = fmt.Errorf("mediaType in image index should be '%s' not '%s'", + if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex { + err = fmt.Errorf("if present, mediaType in image index should be '%s' not '%s'", v1.MediaTypeImageIndex, m.MediaType) return nil, distribution.Descriptor{}, err @@ -205,5 +205,12 @@ func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { // Payload returns the raw content of the manifest list. The contents can be // used to calculate the content identifier. func (m DeserializedManifestList) Payload() (string, []byte, error) { - return m.MediaType, m.canonical, nil + var mediaType string + if m.MediaType == "" { + mediaType = v1.MediaTypeImageIndex + } else { + mediaType = m.MediaType + } + + return mediaType, m.canonical, nil } diff --git a/manifest/manifestlist/manifestlist_test.go b/manifest/manifestlist/manifestlist_test.go index 00b43900..e72292f0 100644 --- a/manifest/manifestlist/manifestlist_test.go +++ b/manifest/manifestlist/manifestlist_test.go @@ -299,7 +299,7 @@ func TestMediaTypes(t *testing.T) { mediaTypeTest(t, MediaTypeManifestList, "", true) mediaTypeTest(t, MediaTypeManifestList, MediaTypeManifestList, false) mediaTypeTest(t, MediaTypeManifestList, MediaTypeManifestList+"XXX", true) - mediaTypeTest(t, v1.MediaTypeImageIndex, "", true) + mediaTypeTest(t, v1.MediaTypeImageIndex, "", false) mediaTypeTest(t, v1.MediaTypeImageIndex, v1.MediaTypeImageIndex, false) mediaTypeTest(t, v1.MediaTypeImageIndex, v1.MediaTypeImageIndex+"XXX", true) } diff --git a/manifest/ocischema/manifest.go b/manifest/ocischema/manifest.go index a6850fde..6de17f9a 100644 --- a/manifest/ocischema/manifest.go +++ b/manifest/ocischema/manifest.go @@ -97,8 +97,8 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { return err } - if manifest.MediaType != v1.MediaTypeImageManifest { - return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", + if manifest.MediaType != "" && manifest.MediaType != v1.MediaTypeImageManifest { + return fmt.Errorf("if present, mediaType in manifest should be '%s' not '%s'", v1.MediaTypeImageManifest, manifest.MediaType) } @@ -120,5 +120,5 @@ func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { // Payload returns the raw content of the manifest. The contents can be used to // calculate the content identifier. func (m DeserializedManifest) Payload() (string, []byte, error) { - return m.MediaType, m.canonical, nil + return v1.MediaTypeImageManifest, m.canonical, nil } diff --git a/manifest/ocischema/manifest_test.go b/manifest/ocischema/manifest_test.go index de53806d..c39d1c9f 100644 --- a/manifest/ocischema/manifest_test.go +++ b/manifest/ocischema/manifest_test.go @@ -178,7 +178,7 @@ func mediaTypeTest(t *testing.T, mediaType string, shouldError bool) { } func TestMediaTypes(t *testing.T) { - mediaTypeTest(t, "", true) + mediaTypeTest(t, "", false) mediaTypeTest(t, v1.MediaTypeImageManifest, false) mediaTypeTest(t, v1.MediaTypeImageManifest+"XXX", true) } diff --git a/registry/storage/manifeststore.go b/registry/storage/manifeststore.go index 6543f6fd..73bff573 100644 --- a/registry/storage/manifeststore.go +++ b/registry/storage/manifeststore.go @@ -106,6 +106,18 @@ func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options .. return ms.ocischemaHandler.Unmarshal(ctx, dgst, content) case manifestlist.MediaTypeManifestList, v1.MediaTypeImageIndex: return ms.manifestListHandler.Unmarshal(ctx, dgst, content) + case "": + // OCI image or image index - no media type in the content + + // First see if it looks like an image index + res, err := ms.manifestListHandler.Unmarshal(ctx, dgst, content) + resIndex := res.(*manifestlist.DeserializedManifestList) + if err == nil && resIndex.Manifests != nil { + return resIndex, nil + } + + // Otherwise, assume it must be an image manifest + return ms.ocischemaHandler.Unmarshal(ctx, dgst, content) default: return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} } From 132abc6de59b8f68f047be70b85edf5932e8f09f Mon Sep 17 00:00:00 2001 From: "Owen W. Taylor" Date: Thu, 15 Mar 2018 16:04:58 -0400 Subject: [PATCH 116/276] Test storing OCI image manifests and indexes with/without a media type OCI Image manifests and indexes are supported both with and without an embeded MediaType (the field is reserved according to the spec). Test storing and retrieving both types from the manifest store. Signed-off-by: Owen W. Taylor --- manifest/ocischema/builder.go | 30 ++++- registry/storage/manifeststore_test.go | 153 ++++++++++++++++++++++++- 2 files changed, 176 insertions(+), 7 deletions(-) diff --git a/manifest/ocischema/builder.go b/manifest/ocischema/builder.go index 9bfef614..22a53c0d 100644 --- a/manifest/ocischema/builder.go +++ b/manifest/ocischema/builder.go @@ -4,12 +4,13 @@ import ( "context" "github.com/docker/distribution" + "github.com/docker/distribution/manifest" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go/v1" ) // builder is a type for constructing manifests. -type builder struct { +type Builder struct { // bs is a BlobService used to publish the configuration blob. bs distribution.BlobService @@ -22,26 +23,43 @@ type builder struct { // Annotations contains arbitrary metadata relating to the targeted content. annotations map[string]string + + // For testing purposes + mediaType string } // NewManifestBuilder is used to build new manifests for the current schema // version. It takes a BlobService so it can publish the configuration blob // as part of the Build process, and annotations. func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotations map[string]string) distribution.ManifestBuilder { - mb := &builder{ + mb := &Builder{ bs: bs, configJSON: make([]byte, len(configJSON)), annotations: annotations, + mediaType: v1.MediaTypeImageManifest, } copy(mb.configJSON, configJSON) return mb } +// For testing purposes, we want to be able to create an OCI image with +// either an MediaType either empty, or with the OCI image value +func (mb *Builder) SetMediaType(mediaType string) { + if mediaType != "" && mediaType != v1.MediaTypeImageManifest { + panic("Invalid media type for OCI image manifest") + } + + mb.mediaType = mediaType +} + // Build produces a final manifest from the given references. -func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { +func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) { m := Manifest{ - Versioned: SchemaVersion, + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: mb.mediaType, + }, Layers: make([]distribution.Descriptor, len(mb.layers)), Annotations: mb.annotations, } @@ -76,12 +94,12 @@ func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { } // AppendReference adds a reference to the current ManifestBuilder. -func (mb *builder) AppendReference(d distribution.Describable) error { +func (mb *Builder) AppendReference(d distribution.Describable) error { mb.layers = append(mb.layers, d.Descriptor()) return nil } // References returns the current references added to this builder. -func (mb *builder) References() []distribution.Descriptor { +func (mb *Builder) References() []distribution.Descriptor { return mb.layers } diff --git a/registry/storage/manifeststore_test.go b/registry/storage/manifeststore_test.go index 2fad7611..61a4beee 100644 --- a/registry/storage/manifeststore_test.go +++ b/registry/storage/manifeststore_test.go @@ -9,6 +9,8 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/ocischema" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" @@ -17,6 +19,7 @@ import ( "github.com/docker/distribution/testutil" "github.com/docker/libtrust" "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" ) type manifestStoreTestEnv struct { @@ -356,6 +359,155 @@ func testManifestStorage(t *testing.T, options ...RegistryOption) { } } +func TestOCIManifestStorage(t *testing.T) { + testOCIManifestStorage(t, "includeMediaTypes=true", true) + testOCIManifestStorage(t, "includeMediaTypes=false", false) +} + +func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes bool) { + var imageMediaType string + var indexMediaType string + if includeMediaTypes { + imageMediaType = v1.MediaTypeImageManifest + indexMediaType = v1.MediaTypeImageIndex + } else { + imageMediaType = "" + indexMediaType = "" + } + + repoName, _ := reference.WithName("foo/bar") + env := newManifestStoreTestEnv(t, repoName, "thetag", + BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), + EnableDelete, EnableRedirect) + + ctx := context.Background() + ms, err := env.repository.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + + // Build a manifest and store it and its layers in the registry + + blobStore := env.repository.Blobs(ctx) + builder := ocischema.NewManifestBuilder(blobStore, []byte{}, map[string]string{}) + builder.(*ocischema.Builder).SetMediaType(imageMediaType) + + // Add some layers + for i := 0; i < 2; i++ { + rs, ds, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("%s: unexpected error generating test layer file", testname) + } + dgst := digest.Digest(ds) + + wr, err := env.repository.Blobs(env.ctx).Create(env.ctx) + if err != nil { + t.Fatalf("%s: unexpected error creating test upload: %v", testname, err) + } + + if _, err := io.Copy(wr, rs); err != nil { + t.Fatalf("%s: unexpected error copying to upload: %v", testname, err) + } + + if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil { + t.Fatalf("%s: unexpected error finishing upload: %v", testname, err) + } + + builder.AppendReference(distribution.Descriptor{Digest: dgst}) + } + + manifest, err := builder.Build(ctx) + if err != nil { + t.Fatalf("%s: unexpected error generating manifest: %v", testname, err) + } + + var manifestDigest digest.Digest + if manifestDigest, err = ms.Put(ctx, manifest); err != nil { + t.Fatalf("%s: unexpected error putting manifest: %v", testname, err) + } + + // Also create an image index that contains the manifest + + descriptor, err := env.registry.BlobStatter().Stat(ctx, manifestDigest) + if err != nil { + t.Fatalf("%s: unexpected error getting manifest descriptor", testname) + } + descriptor.MediaType = v1.MediaTypeImageManifest + + platformSpec := manifestlist.PlatformSpec{ + Architecture: "atari2600", + OS: "CP/M", + } + + manifestDescriptors := []manifestlist.ManifestDescriptor{ + manifestlist.ManifestDescriptor{ + Descriptor: descriptor, + Platform: platformSpec, + }, + } + + imageIndex, err := manifestlist.FromDescriptorsWithMediaType(manifestDescriptors, indexMediaType) + if err != nil { + t.Fatalf("%s: unexpected error creating image index: %v", testname, err) + } + + var indexDigest digest.Digest + if indexDigest, err = ms.Put(ctx, imageIndex); err != nil { + t.Fatalf("%s: unexpected error putting image index: %v", testname, err) + } + + // Now check that we can retrieve the manifest + + fromStore, err := ms.Get(ctx, manifestDigest) + if err != nil { + t.Fatalf("%s: unexpected error fetching manifest: %v", testname, err) + } + + fetchedManifest, ok := fromStore.(*ocischema.DeserializedManifest) + if !ok { + t.Fatalf("%s: unexpected type for fetched manifest", testname) + } + + if fetchedManifest.MediaType != imageMediaType { + t.Fatalf("%s: unexpected MediaType for result, %s", testname, fetchedManifest.MediaType) + } + + payloadMediaType, _, err := fromStore.Payload() + if err != nil { + t.Fatalf("%s: error getting payload %v", testname, err) + } + + if payloadMediaType != v1.MediaTypeImageManifest { + t.Fatalf("%s: unexpected MediaType for manifest payload, %s", testname, payloadMediaType) + } + + // and the image index + + fromStore, err = ms.Get(ctx, indexDigest) + if err != nil { + t.Fatalf("%s: unexpected error fetching image index: %v", testname, err) + } + + fetchedIndex, ok := fromStore.(*manifestlist.DeserializedManifestList) + if !ok { + t.Fatalf("%s: unexpected type for fetched manifest", testname) + } + + if fetchedIndex.MediaType != indexMediaType { + t.Fatalf("%s: unexpected MediaType for result, %s", testname, fetchedManifest.MediaType) + } + + payloadMediaType, _, err = fromStore.Payload() + if err != nil { + t.Fatalf("%s: error getting payload %v", testname, err) + } + + if payloadMediaType != v1.MediaTypeImageIndex { + t.Fatalf("%s: unexpected MediaType for index payload, %s", testname, payloadMediaType) + } + +} + // TestLinkPathFuncs ensures that the link path functions behavior are locked // down and implemented as expected. func TestLinkPathFuncs(t *testing.T) { @@ -387,5 +539,4 @@ func TestLinkPathFuncs(t *testing.T) { t.Fatalf("incorrect path returned: %q != %q", p, testcase.expected) } } - } From 7655a3d91f370135cdde6337d881f91cee119816 Mon Sep 17 00:00:00 2001 From: Huu Nguyen Date: Fri, 3 Mar 2017 11:53:55 -0800 Subject: [PATCH 117/276] Add option to skip certificate verification for the s3 driver Signed-off-by: Huu Nguyen --- registry/storage/driver/s3-aws/s3.go | 40 +++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 0c6f7e5e..3166902b 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -14,6 +14,7 @@ package s3 import ( "bytes" "context" + "crypto/tls" "fmt" "io" "io/ioutil" @@ -90,6 +91,7 @@ type DriverParameters struct { Encrypt bool KeyID string Secure bool + SkipVerify bool V4Auth bool ChunkSize int64 MultipartCopyChunkSize int64 @@ -248,6 +250,23 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { return nil, fmt.Errorf("The secure parameter should be a boolean") } + skipVerifyBool := false + skipVerify := parameters["skipverify"] + switch skipVerify := skipVerify.(type) { + case string: + b, err := strconv.ParseBool(skipVerify) + if err != nil { + return nil, fmt.Errorf("The skipVerify parameter should be a boolean") + } + skipVerifyBool = b + case bool: + skipVerifyBool = skipVerify + case nil: + // do nothing + default: + return nil, fmt.Errorf("The skipVerify parameter should be a boolean") + } + v4Bool := true v4auth := parameters["v4auth"] switch v4auth := v4auth.(type) { @@ -344,6 +363,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { encryptBool, fmt.Sprint(keyID), secureBool, + skipVerifyBool, v4Bool, chunkSize, multipartCopyChunkSize, @@ -420,10 +440,22 @@ func New(params DriverParameters) (*Driver, error) { awsConfig.WithRegion(params.Region) awsConfig.WithDisableSSL(!params.Secure) - if params.UserAgent != "" { - awsConfig.WithHTTPClient(&http.Client{ - Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}})), - }) + if params.UserAgent != "" || params.SkipVerify { + httpTransport := http.DefaultTransport + if params.SkipVerify { + httpTransport = &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + } + if params.UserAgent != "" { + awsConfig.WithHTTPClient(&http.Client{ + Transport: transport.NewTransport(httpTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}})), + }) + } else { + awsConfig.WithHTTPClient(&http.Client{ + Transport: transport.NewTransport(httpTransport), + }) + } } s3obj := s3.New(session.New(awsConfig)) From f5c6357c6da1fb947f6c1376352ab9264ff17a49 Mon Sep 17 00:00:00 2001 From: Huu Nguyen Date: Fri, 3 Mar 2017 11:54:16 -0800 Subject: [PATCH 118/276] Update s3_test to account for SkipVerify parameter Signed-off-by: Huu Nguyen --- registry/storage/driver/s3-aws/s3_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/registry/storage/driver/s3-aws/s3_test.go b/registry/storage/driver/s3-aws/s3_test.go index 363a22eb..66ef6a30 100644 --- a/registry/storage/driver/s3-aws/s3_test.go +++ b/registry/storage/driver/s3-aws/s3_test.go @@ -31,6 +31,7 @@ func init() { encrypt := os.Getenv("S3_ENCRYPT") keyID := os.Getenv("S3_KEY_ID") secure := os.Getenv("S3_SECURE") + skipVerify := os.Getenv("S3_SKIP_VERIFY") v4Auth := os.Getenv("S3_V4_AUTH") region := os.Getenv("AWS_REGION") objectACL := os.Getenv("S3_OBJECT_ACL") @@ -59,6 +60,14 @@ func init() { } } + skipVerifyBool := false + if skipVerify != "" { + skipVerifyBool, err = strconv.ParseBool(skipVerify) + if err != nil { + return nil, err + } + } + v4Bool := true if v4Auth != "" { v4Bool, err = strconv.ParseBool(v4Auth) @@ -76,6 +85,7 @@ func init() { encryptBool, keyID, secureBool, + skipVerifyBool, v4Bool, minChunkSize, defaultMultipartCopyChunkSize, From e8d7941ca6a90ada87caaa9affb67565309aeeba Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Tue, 19 Jun 2018 13:48:42 -0500 Subject: [PATCH 119/276] address lint and gofmt issues Signed-off-by: Mike Brown --- manifest/manifestlist/manifestlist.go | 2 +- manifest/ocischema/builder.go | 6 +++--- registry/storage/manifeststore_test.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/manifest/manifestlist/manifestlist.go b/manifest/manifestlist/manifestlist.go index a08f1e5a..f4e915ee 100644 --- a/manifest/manifestlist/manifestlist.go +++ b/manifest/manifestlist/manifestlist.go @@ -154,7 +154,7 @@ func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestLis return FromDescriptorsWithMediaType(descriptors, mediaType) } -// For testing purposes, it's useful to be able to specify the media type explicitly +// FromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType string) (*DeserializedManifestList, error) { m := ManifestList{ Versioned: manifest.Versioned{ diff --git a/manifest/ocischema/builder.go b/manifest/ocischema/builder.go index 22a53c0d..c189a16e 100644 --- a/manifest/ocischema/builder.go +++ b/manifest/ocischema/builder.go @@ -9,7 +9,7 @@ import ( "github.com/opencontainers/image-spec/specs-go/v1" ) -// builder is a type for constructing manifests. +// Builder is a type for constructing manifests. type Builder struct { // bs is a BlobService used to publish the configuration blob. bs distribution.BlobService @@ -43,8 +43,8 @@ func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotati return mb } -// For testing purposes, we want to be able to create an OCI image with -// either an MediaType either empty, or with the OCI image value +// SetMediaType assigns the passed mediatype or error if the mediatype is not a +// valid media type for oci image manifests currently: "" or "application/vnd.oci.image.manifest.v1+json" func (mb *Builder) SetMediaType(mediaType string) { if mediaType != "" && mediaType != v1.MediaTypeImageManifest { panic("Invalid media type for OCI image manifest") diff --git a/registry/storage/manifeststore_test.go b/registry/storage/manifeststore_test.go index 61a4beee..6e583c53 100644 --- a/registry/storage/manifeststore_test.go +++ b/registry/storage/manifeststore_test.go @@ -440,7 +440,7 @@ func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes boo } manifestDescriptors := []manifestlist.ManifestDescriptor{ - manifestlist.ManifestDescriptor{ + { Descriptor: descriptor, Platform: platformSpec, }, From 276fdce3d9a28e0d56700c8e8333b687cc948769 Mon Sep 17 00:00:00 2001 From: Andrew Leung Date: Wed, 27 Jun 2018 08:47:40 -0700 Subject: [PATCH 120/276] Add configurable layers in manifest events Signed-off-by: Andrew Leung --- cmd/registry/config-cache.yml | 2 ++ cmd/registry/config-dev.yml | 2 ++ configuration/configuration.go | 7 +++++++ docs/configuration.md | 11 +++++++++++ manifest/schema2/manifest.go | 2 +- notifications/bridge.go | 29 +++++++++++++++++------------ notifications/bridge_test.go | 26 ++++++++++++++++++++++---- notifications/event.go | 3 +++ registry/handlers/app.go | 2 +- 9 files changed, 66 insertions(+), 18 deletions(-) diff --git a/cmd/registry/config-cache.yml b/cmd/registry/config-cache.yml index 7a274ea5..f7e47b82 100644 --- a/cmd/registry/config-cache.yml +++ b/cmd/registry/config-cache.yml @@ -29,6 +29,8 @@ redis: readtimeout: 10ms writetimeout: 10ms notifications: + events: + manifestlayers: true endpoints: - name: local-8082 url: http://localhost:5003/callback diff --git a/cmd/registry/config-dev.yml b/cmd/registry/config-dev.yml index e6b09064..25002569 100644 --- a/cmd/registry/config-dev.yml +++ b/cmd/registry/config-dev.yml @@ -47,6 +47,8 @@ redis: readtimeout: 10ms writetimeout: 10ms notifications: + events: + manifestlayers: true endpoints: - name: local-5003 url: http://localhost:5003/callback diff --git a/configuration/configuration.go b/configuration/configuration.go index 621d29ae..59841382 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -544,6 +544,8 @@ func (auth Auth) MarshalYAML() (interface{}, error) { // Notifications configures multiple http endpoints. type Notifications struct { + // EventConfig is the configuration for the event format that is sent to each Endpoint. + EventConfig Events `yaml:"events,omitempty"` // Endpoints is a list of http configurations for endpoints that // respond to webhook notifications. In the future, we may allow other // kinds of endpoints, such as external queues. @@ -564,6 +566,11 @@ type Endpoint struct { Ignore Ignore `yaml:"ignore"` // ignore event types } +// Events configures notification events. +type Events struct { + ManifestLayers bool `yaml:"manifestlayers"` // include layer data in manifest events +} + //Ignore configures mediaTypes and actions of the event, that it won't be propagated type Ignore struct { MediaTypes []string `yaml:"mediatypes"` // target media types to ignore diff --git a/docs/configuration.md b/docs/configuration.md index f2e83513..7a6b9b2c 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -226,6 +226,8 @@ http: http2: disabled: false notifications: + events: + manifestlayers: true endpoints: - name: alistener disabled: false @@ -853,6 +855,8 @@ settings for the registry. ```none notifications: + events: + manifestlayers: true endpoints: - name: alistener disabled: false @@ -896,6 +900,13 @@ accept event notifications. | `mediatypes`|no| A list of target media types to ignore. Events with these target media types are not published to the endpoint. | | `actions` |no| A list of actions to ignore. Events with these actions are not published to the endpoint. | +### `events` + +The `events` structure configures the information provided in event notifications. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `manifestlayers` | no | If `true`, include layer information in manifest events. | ## `redis` diff --git a/manifest/schema2/manifest.go b/manifest/schema2/manifest.go index a2708c75..243d58e9 100644 --- a/manifest/schema2/manifest.go +++ b/manifest/schema2/manifest.go @@ -71,7 +71,7 @@ type Manifest struct { Layers []distribution.Descriptor `json:"layers"` } -// References returnes the descriptors of this manifests references. +// References returns the descriptors of this manifests references. func (m Manifest) References() []distribution.Descriptor { references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) references = append(references, m.Config) diff --git a/notifications/bridge.go b/notifications/bridge.go index 8f6386d3..b336cd89 100644 --- a/notifications/bridge.go +++ b/notifications/bridge.go @@ -12,11 +12,12 @@ import ( ) type bridge struct { - ub URLBuilder - actor ActorRecord - source SourceRecord - request RequestRecord - sink Sink + ub URLBuilder + manfiestLayers bool + actor ActorRecord + source SourceRecord + request RequestRecord + sink Sink } var _ Listener = &bridge{} @@ -31,13 +32,14 @@ type URLBuilder interface { // using the actor and source. Any urls populated in the events created by // this bridge will be created using the URLBuilder. // TODO(stevvooe): Update this to simply take a context.Context object. -func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink) Listener { +func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink, manifestLayers bool) Listener { return &bridge{ - ub: ub, - actor: actor, - source: source, - request: request, - sink: sink, + ub: ub, + manfiestLayers: manifestLayers, + actor: actor, + source: source, + request: request, + sink: sink, } } @@ -135,7 +137,7 @@ func (b *bridge) createManifestEvent(action string, repo reference.Named, sm dis } // Ensure we have the canonical manifest descriptor here - _, desc, err := distribution.UnmarshalManifest(mt, p) + manifest, desc, err := distribution.UnmarshalManifest(mt, p) if err != nil { return nil, err } @@ -144,6 +146,9 @@ func (b *bridge) createManifestEvent(action string, repo reference.Named, sm dis event.Target.Length = desc.Size event.Target.Size = desc.Size event.Target.Digest = desc.Digest + if b.manfiestLayers { + event.Target.Layers = append(event.Target.Layers, manifest.References()...) + } ref, err := reference.WithDigest(repo, event.Target.Digest) if err != nil { diff --git a/notifications/bridge_test.go b/notifications/bridge_test.go index 86350993..cdf500c3 100644 --- a/notifications/bridge_test.go +++ b/notifications/bridge_test.go @@ -26,9 +26,18 @@ var ( Name: "test", } request = RequestRecord{} - m = schema1.Manifest{ - Name: repo, - Tag: "latest", + layers = []schema1.FSLayer{ + { + BlobSum: "asdf", + }, + { + BlobSum: "qwer", + }, + } + m = schema1.Manifest{ + Name: repo, + Tag: "latest", + FSLayers: layers, } sm *schema1.SignedManifest @@ -120,7 +129,7 @@ func createTestEnv(t *testing.T, fn testSinkFn) Listener { payload = sm.Canonical dgst = digest.FromBytes(payload) - return NewBridge(ub, source, actor, request, fn) + return NewBridge(ub, source, actor, request, fn, true) } func checkDeleted(t *testing.T, action string, events ...Event) { @@ -170,6 +179,15 @@ func checkCommonManifest(t *testing.T, action string, events ...Event) { if event.Target.URL != u { t.Fatalf("incorrect url passed: \n%q != \n%q", event.Target.URL, u) } + + if len(event.Target.Layers) != len(layers) { + t.Fatalf("unexpected number of layers %v != %v", len(event.Target.Layers), len(layers)) + } + for i, layer := range event.Target.Layers { + if layer.Digest != layers[i].BlobSum { + t.Fatalf("unexpected layer: %q != %q", layer.Digest, layers[i].BlobSum) + } + } } func checkCommon(t *testing.T, events ...Event) { diff --git a/notifications/event.go b/notifications/event.go index 9651cd1b..ab74c675 100644 --- a/notifications/event.go +++ b/notifications/event.go @@ -71,6 +71,9 @@ type Event struct { // Tag provides the tag Tag string `json:"tag,omitempty"` + + // Layers provides the layers descriptors. + Layers []distribution.Descriptor `json:"layers,omitempty"` } `json:"target,omitempty"` // Request covers the request that generated the event. diff --git a/registry/handlers/app.go b/registry/handlers/app.go index a40a4df3..8ccfffe5 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -869,7 +869,7 @@ func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listene } request := notifications.NewRequestRecord(dcontext.GetRequestID(ctx), r) - return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) + return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink, app.Config.Notifications.EventConfig.ManifestLayers) } // nameRequired returns true if the route requires a name. From 0d8f4ac7b8d62c237ff93ee0b3cce5114f425c79 Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Wed, 18 Jul 2018 11:25:59 -0700 Subject: [PATCH 121/276] add tag deletion event whenever a tag is deleted an event is sent out via the regular notification channels Signed-off-by: Manish Tomar --- notifications/bridge.go | 8 ++++++++ notifications/bridge_test.go | 23 ++++++++++++++++++----- notifications/listener.go | 30 ++++++++++++++++++++++++++++++ notifications/listener_test.go | 12 ++++++++++-- 4 files changed, 66 insertions(+), 7 deletions(-) diff --git a/notifications/bridge.go b/notifications/bridge.go index 8f6386d3..bc4a90aa 100644 --- a/notifications/bridge.go +++ b/notifications/bridge.go @@ -108,6 +108,14 @@ func (b *bridge) BlobDeleted(repo reference.Named, dgst digest.Digest) error { return b.createBlobDeleteEventAndWrite(EventActionDelete, repo, dgst) } +func (b *bridge) TagDeleted(repo reference.Named, tag string) error { + event := b.createEvent(EventActionDelete) + event.Target.Repository = repo.Name() + event.Target.Tag = tag + + return b.sink.Write(*event) +} + func (b *bridge) createManifestEventAndWrite(action string, repo reference.Named, sm distribution.Manifest) error { manifestEvent, err := b.createManifestEvent(action, repo, sm) if err != nil { diff --git a/notifications/bridge_test.go b/notifications/bridge_test.go index 86350993..0f4d7736 100644 --- a/notifications/bridge_test.go +++ b/notifications/bridge_test.go @@ -97,6 +97,9 @@ func TestEventBridgeManifestPulledWithTag(t *testing.T) { func TestEventBridgeManifestDeleted(t *testing.T) { l := createTestEnv(t, testSinkFn(func(events ...Event) error { checkDeleted(t, EventActionDelete, events...) + if events[0].Target.Digest != dgst { + t.Fatalf("unexpected digest on event target: %q != %q", events[0].Target.Digest, dgst) + } return nil })) @@ -106,6 +109,21 @@ func TestEventBridgeManifestDeleted(t *testing.T) { } } +func TestEventBridgeTagDeleted(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkDeleted(t, EventActionDelete, events...) + if events[0].Target.Tag != m.Tag { + t.Fatalf("unexpected tag on event target: %q != %q", events[0].Target.Tag, m.Tag) + } + return nil + })) + + repoRef, _ := reference.WithName(repo) + if err := l.TagDeleted(repoRef, m.Tag); err != nil { + t.Fatalf("unexpected error notifying tag deletion: %v", err) + } +} + func createTestEnv(t *testing.T, fn testSinkFn) Listener { pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { @@ -142,14 +160,9 @@ func checkDeleted(t *testing.T, action string, events ...Event) { t.Fatalf("request not equal: %#v != %#v", event.Actor, actor) } - if event.Target.Digest != dgst { - t.Fatalf("unexpected digest on event target: %q != %q", event.Target.Digest, dgst) - } - if event.Target.Repository != repo { t.Fatalf("unexpected repository: %q != %q", event.Target.Repository, repo) } - } func checkCommonManifest(t *testing.T, action string, events ...Event) { diff --git a/notifications/listener.go b/notifications/listener.go index 52ec0ee7..8cfdb67e 100644 --- a/notifications/listener.go +++ b/notifications/listener.go @@ -5,6 +5,7 @@ import ( "net/http" "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" @@ -25,10 +26,16 @@ type BlobListener interface { BlobDeleted(repo reference.Named, desc digest.Digest) error } +// RepoListener describes a listener that can respond to repository related events. +type RepoListener interface { + TagDeleted(repo reference.Named, tag string) error +} + // Listener combines all repository events into a single interface. type Listener interface { ManifestListener BlobListener + RepoListener } type repositoryListener struct { @@ -214,3 +221,26 @@ func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Des return committed, err } + +type tagServiceListener struct { + distribution.TagService + parent *repositoryListener +} + +func (rl *repositoryListener) Tags(ctx context.Context) distribution.TagService { + return &tagServiceListener{ + TagService: rl.Repository.Tags(ctx), + parent: rl, + } +} + +func (tagSL *tagServiceListener) Untag(ctx context.Context, tag string) error { + if err := tagSL.TagService.Untag(ctx, tag); err != nil { + return err + } + if err := tagSL.parent.listener.TagDeleted(tagSL.parent.Repository.Named(), tag); err != nil { + dcontext.GetLogger(ctx).Errorf("error dispatching tag deleted to listener: %v", err) + return err + } + return nil +} diff --git a/notifications/listener_test.go b/notifications/listener_test.go index a5849807..32a7f6d9 100644 --- a/notifications/listener_test.go +++ b/notifications/listener_test.go @@ -50,12 +50,12 @@ func TestListener(t *testing.T) { "layer:push": 2, "layer:pull": 2, "layer:delete": 2, + "tag:delete": 1, } if !reflect.DeepEqual(tl.ops, expectedOps) { t.Fatalf("counts do not match:\n%v\n !=\n%v", tl.ops, expectedOps) } - } type testListener struct { @@ -64,7 +64,6 @@ type testListener struct { func (tl *testListener) ManifestPushed(repo reference.Named, m distribution.Manifest, options ...distribution.ManifestServiceOption) error { tl.ops["manifest:push"]++ - return nil } @@ -98,6 +97,11 @@ func (tl *testListener) BlobDeleted(repo reference.Named, d digest.Digest) error return nil } +func (tl *testListener) TagDeleted(repo reference.Named, tag string) error { + tl.ops["tag:delete"]++ + return nil +} + // checkExerciseRegistry takes the registry through all of its operations, // carrying out generic checks. func checkExerciseRepository(t *testing.T, repository distribution.Repository) { @@ -200,6 +204,10 @@ func checkExerciseRepository(t *testing.T, repository distribution.Repository) { if err != nil { t.Fatalf("unexpected error deleting blob: %v", err) } + } + err = repository.Tags(ctx).Untag(ctx, m.Tag) + if err != nil { + t.Fatalf("unexpected error deleting tag: %v", err) } } From 5f588fbf9b2eb3cef7a6c5876319a4ec88c43c87 Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Thu, 19 Jul 2018 16:07:26 -0500 Subject: [PATCH 122/276] address review comment regarding panic use Signed-off-by: Mike Brown --- manifest/ocischema/builder.go | 6 ++++-- registry/storage/manifeststore_test.go | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/manifest/ocischema/builder.go b/manifest/ocischema/builder.go index c189a16e..023a8a0b 100644 --- a/manifest/ocischema/builder.go +++ b/manifest/ocischema/builder.go @@ -2,6 +2,7 @@ package ocischema import ( "context" + "errors" "github.com/docker/distribution" "github.com/docker/distribution/manifest" @@ -45,12 +46,13 @@ func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotati // SetMediaType assigns the passed mediatype or error if the mediatype is not a // valid media type for oci image manifests currently: "" or "application/vnd.oci.image.manifest.v1+json" -func (mb *Builder) SetMediaType(mediaType string) { +func (mb *Builder) SetMediaType(mediaType string) error { if mediaType != "" && mediaType != v1.MediaTypeImageManifest { - panic("Invalid media type for OCI image manifest") + return errors.New("Invalid media type for OCI image manifest") } mb.mediaType = mediaType + return nil } // Build produces a final manifest from the given references. diff --git a/registry/storage/manifeststore_test.go b/registry/storage/manifeststore_test.go index 6e583c53..c6a2ba06 100644 --- a/registry/storage/manifeststore_test.go +++ b/registry/storage/manifeststore_test.go @@ -390,7 +390,10 @@ func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes boo blobStore := env.repository.Blobs(ctx) builder := ocischema.NewManifestBuilder(blobStore, []byte{}, map[string]string{}) - builder.(*ocischema.Builder).SetMediaType(imageMediaType) + err = builder.(*ocischema.Builder).SetMediaType(imageMediaType) + if err != nil { + t.Fatal(err) + } // Add some layers for i := 0; i < 2; i++ { From 20aecf1d7b8c975e22679bb5e5b6502c49c111b5 Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Thu, 19 Jul 2018 19:41:31 -0500 Subject: [PATCH 123/276] added test for initial oci schema version Signed-off-by: Mike Brown --- registry/storage/manifeststore_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/registry/storage/manifeststore_test.go b/registry/storage/manifeststore_test.go index c6a2ba06..7c9b9edf 100644 --- a/registry/storage/manifeststore_test.go +++ b/registry/storage/manifeststore_test.go @@ -475,6 +475,10 @@ func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes boo t.Fatalf("%s: unexpected MediaType for result, %s", testname, fetchedManifest.MediaType) } + if fetchedManifest.SchemaVersion != ocischema.SchemaVersion.SchemaVersion { + t.Fatalf("%s: unexpected schema version for result, %d", testname, fetchedManifest.SchemaVersion) + } + payloadMediaType, _, err := fromStore.Payload() if err != nil { t.Fatalf("%s: error getting payload %v", testname, err) From 54aef6251e4970010567f7b79779c18e07e5db02 Mon Sep 17 00:00:00 2001 From: Andrew Leung Date: Sun, 29 Jul 2018 07:53:33 -0700 Subject: [PATCH 124/276] Fix typo in manifestLayers variable name Signed-off-by: Andrew Leung --- notifications/bridge.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/notifications/bridge.go b/notifications/bridge.go index b336cd89..1a58c2bb 100644 --- a/notifications/bridge.go +++ b/notifications/bridge.go @@ -13,7 +13,7 @@ import ( type bridge struct { ub URLBuilder - manfiestLayers bool + manifestLayers bool actor ActorRecord source SourceRecord request RequestRecord @@ -35,7 +35,7 @@ type URLBuilder interface { func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink, manifestLayers bool) Listener { return &bridge{ ub: ub, - manfiestLayers: manifestLayers, + manifestLayers: manifestLayers, actor: actor, source: source, request: request, @@ -146,7 +146,7 @@ func (b *bridge) createManifestEvent(action string, repo reference.Named, sm dis event.Target.Length = desc.Size event.Target.Size = desc.Size event.Target.Digest = desc.Digest - if b.manfiestLayers { + if b.manifestLayers { event.Target.Layers = append(event.Target.Layers, manifest.References()...) } From dc49f84dcc64940628505390a7be3394e4314482 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 1 Aug 2018 15:37:21 -0700 Subject: [PATCH 125/276] Update build to use travis Update Makefile targets Signed-off-by: Derek McGowan --- .gometalinter.json | 11 +++ .travis.yml | 52 ++++++++++++ Makefile | 142 +++++++++++++++++---------------- circle.yml | 94 ---------------------- coverpkg.sh | 7 -- script/setup/install-dev-tools | 11 +++ script/validate/dco | 12 +++ script/validate/vendor | 20 +++++ version/version.go | 4 + version/version.sh | 6 +- 10 files changed, 189 insertions(+), 170 deletions(-) create mode 100644 .gometalinter.json create mode 100644 .travis.yml delete mode 100644 circle.yml delete mode 100755 coverpkg.sh create mode 100755 script/setup/install-dev-tools create mode 100755 script/validate/dco create mode 100755 script/validate/vendor diff --git a/.gometalinter.json b/.gometalinter.json new file mode 100644 index 00000000..08fe705d --- /dev/null +++ b/.gometalinter.json @@ -0,0 +1,11 @@ +{ + "Vendor": true, + "Deadline": "2m", + "Sort": ["linter", "severity", "path", "line"], + "EnableGC": true, + "Enable": [ + "gofmt", + "golint", + "vet" + ] +} diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..3b5713a4 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,52 @@ +dist: trusty +sudo: required +# setup travis so that we can run containers for integration tests +services: + - docker + +language: go + +go: + - "1.9.x" + - "1.10.x" + +go_import_path: github.com/docker/distribution + +addons: + apt: + packages: + - python-minimal + + +env: + - TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1 + +before_install: + - uname -r + - sudo apt-get -q update + +install: + - go get -u github.com/vbatts/git-validation + # TODO: Add enforcement of license + # - go get -u github.com/kunalkushwaha/ltag + - cd $TRAVIS_BUILD_DIR + +script: + - export GOOS=$TRAVIS_GOOS + - export CGO_ENABLED=$TRAVIS_CGO_ENABLED + - DCO_VERBOSITY=-q script/validate/dco + - GOOS=linux script/setup/install-dev-tools + - script/validate/vendor + - go build -i . + - make check + - make build + - make binaries + # Currently takes too long + #- if [ "$GOOS" = "linux" ]; then make test-race ; fi + - if [ "$GOOS" = "linux" ]; then make coverage ; fi + +after_success: + - bash <(curl -s https://codecov.io/bash) -F linux + +before_deploy: + # Run tests with storage driver configurations diff --git a/Makefile b/Makefile index 7c6f9c7a..df89bb0c 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,21 @@ -# Set an output prefix, which is the local directory if not specified -PREFIX?=$(shell pwd) - +# Root directory of the project (absolute path). +ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # Used to populate version variable in main package. VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) +REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) + + +PKG=github.com/docker/distribution + +# Project packages. +PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/) +INTEGRATION_PACKAGE=${PKG} +COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES}) + + +# Project binaries. +COMMANDS=registry digest registry-api-descriptor-template # Allow turning off function inlining and variable registerization ifeq (${DISABLE_OPTIMIZATION},true) @@ -11,89 +23,83 @@ ifeq (${DISABLE_OPTIMIZATION},true) VERSION:="$(VERSION)-noopt" endif -GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" +WHALE = "+" -.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet +# Go files +# +TESTFLAGS_RACE= +GOFILES=$(shell find . -type f -name '*.go') +GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",) +GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)' + +BINARIES=$(addprefix bin/,$(COMMANDS)) + +# Flags passed to `go test` +TESTFLAGS ?= -v $(TESTFLAGS_RACE) +TESTFLAGS_PARALLEL ?= 8 + +.PHONY: all build binaries check clean test test-race test-full integration coverage .DEFAULT: all -all: fmt vet lint build test binaries + +all: binaries AUTHORS: .mailmap .git/HEAD git log --format='%aN <%aE>' | sort -fu > $@ # This only needs to be generated by hand when cutting full releases. version/version.go: + @echo "$(WHALE) $@" ./version/version.sh > $@ -# Required for go 1.5 to build -GO15VENDOREXPERIMENT := 1 +check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") + @echo "$(WHALE) $@" + gometalinter --config .gometalinter.json ./... -# Go files -GOFILES=$(shell find . -type f -name '*.go') +test: ## run tests, except integration test with test.short + @echo "$(WHALE) $@" + @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) -# Package list -PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) +test-race: ## run tests, except integration test with test.short and race + @echo "$(WHALE) $@" + @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) -# Resolving binary dependencies for specific targets -GOLINT=$(shell which golint || echo '') -VNDR=$(shell which vndr || echo '') +test-full: ## run tests, except integration tests + @echo "$(WHALE) $@" + @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) -${PREFIX}/bin/registry: $(GOFILES) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry +integration: ## run integration tests + @echo "$(WHALE) $@" + @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE} -${PREFIX}/bin/digest: $(GOFILES) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest +coverage: ## generate coverprofiles from the unit tests + @echo "$(WHALE) $@" + @rm -f coverage.txt + @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null + @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \ + go test ${GO_TAGS} ${TESTFLAGS} \ + -cover \ + -coverprofile=profile.out \ + -covermode=atomic $$pkg || exit; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi; \ + done ) -${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES) - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template +FORCE: -docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template - ./bin/registry-api-descriptor-template $< > $@ +# Build a binary from a cmd. +bin/%: cmd/% FORCE + @echo "$(WHALE) $@${BINARY_SUFFIX}" + @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$< -vet: - @echo "+ $@" - @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -fmt: - @echo "+ $@" - @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ - (echo >&2 "+ please format Go code with 'gofmt -s'" && false) - -lint: - @echo "+ $@" - $(if $(GOLINT), , \ - $(error Please install golint: `go get -u github.com/golang/lint/golint`)) - @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" +binaries: $(BINARIES) ## build binaries + @echo "$(WHALE) $@" build: - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) + @echo "$(WHALE) $@" + @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES) -test: - @echo "+ $@" - @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -test-full: - @echo "+ $@" - @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template - @echo "+ $@" - -clean: - @echo "+ $@" - @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" - -dep-validate: - @echo "+ $@" - $(if $(VNDR), , \ - $(error Please install vndr: go get github.com/lk4d4/vndr)) - @rm -Rf .vendor.bak - @mv vendor .vendor.bak - @$(VNDR) - @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ - (echo >&2 "+ inconsistent dependencies! what you have in vendor.conf does not match with what you have in vendor" && false) - @rm -Rf vendor - @mv .vendor.bak vendor +clean: ## clean up binaries + @echo "$(WHALE) $@" + @rm -f $(BINARIES) diff --git a/circle.yml b/circle.yml deleted file mode 100644 index 085ef4f7..00000000 --- a/circle.yml +++ /dev/null @@ -1,94 +0,0 @@ -# Pony-up! -machine: - pre: - # Install gvm - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) - # Install codecov for coverage - - pip install --user codecov - - post: - # go - - gvm install go1.10 --prefer-binary --name=stable - - environment: - # Convenient shortcuts to "common" locations - CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME - BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - # Trick circle brainflat "no absolute path" behavior - BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_oss include_gcs" - # Workaround Circle parsing dumb bugs and/or YAML wonkyness - CIRCLE_PAIN: "mode: set" - - hosts: - # Not used yet - fancy: 127.0.0.1 - -dependencies: - pre: - # Copy the code to the gopath of all go versions - - > - gvm use stable && - mkdir -p "$(dirname $BASE_STABLE)" && - cp -R "$CHECKOUT" "$BASE_STABLE" - - override: - # Install dependencies for every copied clone/go version - - gvm use stable && go get github.com/lk4d4/vndr: - pwd: $BASE_STABLE - - post: - # For the stable go version, additionally install linting tools - - > - gvm use stable && - go get github.com/axw/gocov/gocov github.com/golang/lint/golint - -test: - pre: - # Output the go versions we are going to test - # - gvm use old && go version - - gvm use stable && go version - - # Ensure validation of dependencies - - git fetch origin: - pwd: $BASE_STABLE - - gvm use stable && if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi: - pwd: $BASE_STABLE - - # First thing: build everything. This will catch compile errors, and it's - # also necessary for go vet to work properly (see #807). - - gvm use stable && go install $(go list ./... | grep -v "/vendor/"): - pwd: $BASE_STABLE - - # FMT - - gvm use stable && make fmt: - pwd: $BASE_STABLE - - # VET - - gvm use stable && make vet: - pwd: $BASE_STABLE - - # LINT - - gvm use stable && make lint: - pwd: $BASE_STABLE - - override: - # Test stable, and report - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': - timeout: 1000 - pwd: $BASE_STABLE - - # Test stable with race - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': - timeout: 1000 - pwd: $BASE_STABLE - post: - # Report to codecov - - bash <(curl -s https://codecov.io/bash): - pwd: $BASE_STABLE - - ## Notes - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck diff --git a/coverpkg.sh b/coverpkg.sh deleted file mode 100755 index 25d419ae..00000000 --- a/coverpkg.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Given a subpackage and the containing package, figures out which packages -# need to be passed to `go test -coverpkg`: this includes all of the -# subpackage's dependencies within the containing package, as well as the -# subpackage itself. -DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" -echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/script/setup/install-dev-tools b/script/setup/install-dev-tools new file mode 100755 index 00000000..d4e134a1 --- /dev/null +++ b/script/setup/install-dev-tools @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +# +# Install developer tools to $GOBIN (or $GOPATH/bin if unset) +# +set -eu -o pipefail + +go get -u github.com/alecthomas/gometalinter +gometalinter --install >/dev/null +go get -u github.com/LK4D4/vndr +go get -u github.com/cpuguy83/go-md2man diff --git a/script/validate/dco b/script/validate/dco new file mode 100755 index 00000000..fb68620a --- /dev/null +++ b/script/validate/dco @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -eu -o pipefail + +if ! command -v git-validation; then + >&2 echo "ERROR: git-validation not found. Install with:" + >&2 echo " go get -u github.com/vbatts/git-validation" + exit 1 +fi + +verbosity="${DCO_VERBOSITY--v}" +GIT_CHECK_EXCLUDE="./vendor:./script/validate/template" git-validation "$verbosity" -run DCO,short-subject,dangling-whitespace diff --git a/script/validate/vendor b/script/validate/vendor new file mode 100755 index 00000000..7e81fdd7 --- /dev/null +++ b/script/validate/vendor @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -eu -o pipefail + +rm -rf vendor/ +vndr |& grep -v -i clone + +DIFF_PATH="vendor/" +DIFF=$(git status --porcelain -- "$DIFF_PATH") + +if [ "$DIFF" ]; then + echo + echo "These files were modified:" + echo + echo "$DIFF" + echo + exit 1 +else + echo "$DIFF_PATH is correct" +fi diff --git a/version/version.go b/version/version.go index 807bb4a2..5954d95d 100644 --- a/version/version.go +++ b/version/version.go @@ -9,3 +9,7 @@ var Package = "github.com/docker/distribution" // build, it will be replaced by the actual version. The value here will be // used if the registry is run after a go get based install. var Version = "v2.6.0+unknown" + +// Revision is filled with the VCS (e.g. git) revision being used to build +// the program at linking time. +var Revision = "" diff --git a/version/version.sh b/version/version.sh index 53e29ce9..75dca784 100755 --- a/version/version.sh +++ b/version/version.sh @@ -19,4 +19,8 @@ var Package = "$(go list)" // build, it will be replaced by the actual version. The value here will be // used if the registry is run after a go get based install. var Version = "$(git describe --match 'v[0-9]*' --dirty='.m' --always)+unknown" -EOF \ No newline at end of file + +// Revision is filled with the VCS (e.g. git) revision being used to build +// the program at linking time. +var Revision = "" +EOF From 328069bb4dc4a0db3f024960ab89a5ee68d69240 Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Thu, 2 Aug 2018 22:58:52 -0700 Subject: [PATCH 126/276] add support for repo deleted event also by having another interface RepositoryRemover that is implemented by registry instance and is injected in app context for event tracking Signed-off-by: Manish Tomar --- notifications/bridge.go | 7 +++++++ notifications/bridge_test.go | 12 ++++++++++++ notifications/listener.go | 24 ++++++++++++++++++++---- notifications/listener_test.go | 22 +++++++++++++++++++--- registry.go | 5 +++++ registry/storage/catalog.go | 11 +++++++++++ registry/storage/registry.go | 2 ++ 7 files changed, 76 insertions(+), 7 deletions(-) diff --git a/notifications/bridge.go b/notifications/bridge.go index bc4a90aa..48048063 100644 --- a/notifications/bridge.go +++ b/notifications/bridge.go @@ -116,6 +116,13 @@ func (b *bridge) TagDeleted(repo reference.Named, tag string) error { return b.sink.Write(*event) } +func (b *bridge) RepoDeleted(repo reference.Named) error { + event := b.createEvent(EventActionDelete) + event.Target.Repository = repo.Name() + + return b.sink.Write(*event) +} + func (b *bridge) createManifestEventAndWrite(action string, repo reference.Named, sm distribution.Manifest) error { manifestEvent, err := b.createManifestEvent(action, repo, sm) if err != nil { diff --git a/notifications/bridge_test.go b/notifications/bridge_test.go index 0f4d7736..5c1401aa 100644 --- a/notifications/bridge_test.go +++ b/notifications/bridge_test.go @@ -124,6 +124,18 @@ func TestEventBridgeTagDeleted(t *testing.T) { } } +func TestEventBridgeRepoDeleted(t *testing.T) { + l := createTestEnv(t, testSinkFn(func(events ...Event) error { + checkDeleted(t, EventActionDelete, events...) + return nil + })) + + repoRef, _ := reference.WithName(repo) + if err := l.RepoDeleted(repoRef); err != nil { + t.Fatalf("unexpected error notifying repo deletion: %v", err) + } +} + func createTestEnv(t *testing.T, fn testSinkFn) Listener { pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { diff --git a/notifications/listener.go b/notifications/listener.go index 8cfdb67e..b11f0773 100644 --- a/notifications/listener.go +++ b/notifications/listener.go @@ -26,9 +26,9 @@ type BlobListener interface { BlobDeleted(repo reference.Named, desc digest.Digest) error } -// RepoListener describes a listener that can respond to repository related events. type RepoListener interface { TagDeleted(repo reference.Named, tag string) error + RepoDeleted(repo reference.Named) error } // Listener combines all repository events into a single interface. @@ -43,12 +43,28 @@ type repositoryListener struct { listener Listener } +type removerListener struct { + distribution.RepositoryRemover + listener Listener +} + // Listen dispatches events on the repository to the listener. -func Listen(repo distribution.Repository, listener Listener) distribution.Repository { +func Listen(repo distribution.Repository, remover distribution.RepositoryRemover, listener Listener) (distribution.Repository, distribution.RepositoryRemover) { return &repositoryListener{ - Repository: repo, - listener: listener, + Repository: repo, + listener: listener, + }, &removerListener{ + RepositoryRemover: remover, + listener: listener, + } +} + +func (nl *removerListener) Remove(ctx context.Context, name reference.Named) error { + err := nl.RepositoryRemover.Remove(ctx, name) + if err != nil { + return err } + return nl.listener.RepoDeleted(name) } func (rl *repositoryListener) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { diff --git a/notifications/listener_test.go b/notifications/listener_test.go index 32a7f6d9..1d8b731e 100644 --- a/notifications/listener_test.go +++ b/notifications/listener_test.go @@ -38,10 +38,15 @@ func TestListener(t *testing.T) { if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } - repository = Listen(repository, tl) + + remover, ok := registry.(distribution.RepositoryRemover) + if !ok { + t.Fatal("registry does not implement RepositoryRemover") + } + repository, remover = Listen(repository, remover, tl) // Now take the registry through a number of operations - checkExerciseRepository(t, repository) + checkExerciseRepository(t, repository, remover) expectedOps := map[string]int{ "manifest:push": 1, @@ -51,6 +56,7 @@ func TestListener(t *testing.T) { "layer:pull": 2, "layer:delete": 2, "tag:delete": 1, + "repo:delete": 1, } if !reflect.DeepEqual(tl.ops, expectedOps) { @@ -102,9 +108,14 @@ func (tl *testListener) TagDeleted(repo reference.Named, tag string) error { return nil } +func (tl *testListener) RepoDeleted(repo reference.Named) error { + tl.ops["repo:delete"]++ + return nil +} + // checkExerciseRegistry takes the registry through all of its operations, // carrying out generic checks. -func checkExerciseRepository(t *testing.T, repository distribution.Repository) { +func checkExerciseRepository(t *testing.T, repository distribution.Repository, remover distribution.RepositoryRemover) { // TODO(stevvooe): This would be a nice testutil function. Basically, it // takes the registry through a common set of operations. This could be // used to make cross-cutting updates by changing internals that affect @@ -210,4 +221,9 @@ func checkExerciseRepository(t *testing.T, repository distribution.Repository) { if err != nil { t.Fatalf("unexpected error deleting tag: %v", err) } + + err = remover.Remove(ctx, repository.Named()) + if err != nil { + t.Fatalf("unexpected error deleting repo: %v", err) + } } diff --git a/registry.go b/registry.go index a3a80ab8..6c321098 100644 --- a/registry.go +++ b/registry.go @@ -54,6 +54,11 @@ type RepositoryEnumerator interface { Enumerate(ctx context.Context, ingester func(string) error) error } +// RepositoryRemover removes given repository +type RepositoryRemover interface { + Remove(ctx context.Context, name reference.Named) error +} + // ManifestServiceOption is a function argument for Manifest Service methods type ManifestServiceOption interface { Apply(ManifestService) error diff --git a/registry/storage/catalog.go b/registry/storage/catalog.go index 3c1a78de..4db8bd88 100644 --- a/registry/storage/catalog.go +++ b/registry/storage/catalog.go @@ -7,6 +7,7 @@ import ( "path" "strings" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" ) @@ -70,6 +71,16 @@ func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) return err } +// Remove removes a repository from storage +func (r *registry) Remove(ctx context.Context, name reference.Named) error { + root, err := pathFor(repositoriesRootPathSpec{}) + if err != nil { + return err + } + repoDir := path.Join(root, name.Name()) + return r.driver.Delete(ctx, repoDir) +} + // lessPath returns true if one path a is less than path b. // // A component-wise comparison is done, rather than the lexical comparison of diff --git a/registry/storage/registry.go b/registry/storage/registry.go index 46b96853..70d5b8d2 100644 --- a/registry/storage/registry.go +++ b/registry/storage/registry.go @@ -23,6 +23,7 @@ type registry struct { schema1SigningKey libtrust.PrivateKey blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory manifestURLs manifestURLs + driver storagedriver.StorageDriver } // manifestURLs holds regular expressions for controlling manifest URL whitelisting @@ -133,6 +134,7 @@ func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, option }, statter: statter, resumableDigestEnabled: true, + driver: driver, } for _, option := range options { From 8f6758278d90fc2f1fba7ca42a21f5dfe34fe52d Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Thu, 2 Aug 2018 23:13:37 -0700 Subject: [PATCH 127/276] take handler update forgot to commit this earlier Signed-off-by: Manish Tomar --- registry/handlers/app.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/registry/handlers/app.go b/registry/handlers/app.go index a40a4df3..76c63080 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -58,10 +58,11 @@ type App struct { Config *configuration.Configuration - router *mux.Router // main application router, configured with dispatchers - driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. - registry distribution.Namespace // registry is the primary registry backend for the app instance. - accessController auth.AccessController // main access controller for application + router *mux.Router // main application router, configured with dispatchers + driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. + registry distribution.Namespace // registry is the primary registry backend for the app instance. + repoRemover distribution.RepositoryRemover // repoRemover provides ability to delete repos + accessController auth.AccessController // main access controller for application // httpHost is a parsed representation of the http.host parameter from // the configuration. Only the Scheme and Host fields are used. @@ -320,6 +321,11 @@ func NewApp(ctx context.Context, config *configuration.Configuration) *App { app.isCache = true dcontext.GetLogger(app).Info("Registry configured as a proxy cache to ", config.Proxy.RemoteURL) } + var ok bool + app.repoRemover, ok = app.registry.(distribution.RepositoryRemover) + if !ok { + dcontext.GetLogger(app).Warnf("Registry does not implement RempositoryRemover. Will not be able to delete repos and tags") + } return app } @@ -696,8 +702,9 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { } // assign and decorate the authorized repository with an event bridge. - context.Repository = notifications.Listen( + context.Repository, context.App.repoRemover = notifications.Listen( repository, + context.App.repoRemover, app.eventBridge(context, r)) context.Repository, err = applyRepoMiddleware(app, context.Repository, app.Config.Middleware["repository"]) From e4d5a0a17c37e54a1cf9ca2ae2be10ce6df89354 Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Fri, 3 Aug 2018 14:08:00 -0700 Subject: [PATCH 128/276] Add documentation Signed-off-by: Manish Tomar --- notifications/listener.go | 1 + 1 file changed, 1 insertion(+) diff --git a/notifications/listener.go b/notifications/listener.go index b11f0773..98ad8da9 100644 --- a/notifications/listener.go +++ b/notifications/listener.go @@ -26,6 +26,7 @@ type BlobListener interface { BlobDeleted(repo reference.Named, desc digest.Digest) error } +// RepoListener provides repository methods that respond to repository lifecycle type RepoListener interface { TagDeleted(repo reference.Named, tag string) error RepoDeleted(repo reference.Named) error From 8c05756141eb3b22d41860c58735bbf21024fd7e Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Mon, 6 Aug 2018 09:46:42 -0700 Subject: [PATCH 129/276] lint fix Signed-off-by: Manish Tomar --- registry/storage/catalog.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/registry/storage/catalog.go b/registry/storage/catalog.go index 4db8bd88..ebf80e05 100644 --- a/registry/storage/catalog.go +++ b/registry/storage/catalog.go @@ -72,13 +72,13 @@ func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) } // Remove removes a repository from storage -func (r *registry) Remove(ctx context.Context, name reference.Named) error { +func (reg *registry) Remove(ctx context.Context, name reference.Named) error { root, err := pathFor(repositoriesRootPathSpec{}) if err != nil { return err } repoDir := path.Join(root, name.Name()) - return r.driver.Delete(ctx, repoDir) + return reg.driver.Delete(ctx, repoDir) } // lessPath returns true if one path a is less than path b. From 32e2260be280c961c3db31e07db20766547ed546 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 6 Aug 2018 13:55:26 -0700 Subject: [PATCH 130/276] Enable struct check Signed-off-by: Derek McGowan --- .gometalinter.json | 2 ++ registry/api/v2/routes.go | 9 --------- registry/client/auth/session.go | 2 -- registry/client/repository.go | 12 +++++------- 4 files changed, 7 insertions(+), 18 deletions(-) diff --git a/.gometalinter.json b/.gometalinter.json index 08fe705d..0ceaa139 100644 --- a/.gometalinter.json +++ b/.gometalinter.json @@ -4,6 +4,8 @@ "Sort": ["linter", "severity", "path", "line"], "EnableGC": true, "Enable": [ + "structcheck", + "gofmt", "golint", "vet" diff --git a/registry/api/v2/routes.go b/registry/api/v2/routes.go index 5b80d5be..9612ac2e 100644 --- a/registry/api/v2/routes.go +++ b/registry/api/v2/routes.go @@ -14,15 +14,6 @@ const ( RouteNameCatalog = "catalog" ) -var allEndpoints = []string{ - RouteNameManifest, - RouteNameCatalog, - RouteNameTags, - RouteNameBlob, - RouteNameBlobUpload, - RouteNameBlobUploadChunk, -} - // Router builds a gorilla router with named routes for the various API // methods. This can be used directly by both server implementations and // clients. diff --git a/registry/client/auth/session.go b/registry/client/auth/session.go index db86c9b0..aad8a0e6 100644 --- a/registry/client/auth/session.go +++ b/registry/client/auth/session.go @@ -68,7 +68,6 @@ func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) type endpointAuthorizer struct { challenges challenge.Manager handlers []AuthenticationHandler - transport http.RoundTripper } func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { @@ -121,7 +120,6 @@ type clock interface { } type tokenHandler struct { - header http.Header creds CredentialStore transport http.RoundTripper clock clock diff --git a/registry/client/repository.go b/registry/client/repository.go index d8e2c795..aa442e65 100644 --- a/registry/client/repository.go +++ b/registry/client/repository.go @@ -81,9 +81,8 @@ func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) } type registry struct { - client *http.Client - ub *v2.URLBuilder - context context.Context + client *http.Client + ub *v2.URLBuilder } // Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size @@ -152,10 +151,9 @@ func NewRepository(name reference.Named, baseURL string, transport http.RoundTri } type repository struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named + client *http.Client + ub *v2.URLBuilder + name reference.Named } func (r *repository) Named() reference.Named { From 795e11d5fb2c7b8345220f252793c25a1b38d6a9 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 6 Aug 2018 12:12:07 -0700 Subject: [PATCH 131/276] Enable goimports check Validates that goimports has been run on all files Signed-off-by: Derek McGowan --- .gometalinter.json | 1 + notifications/endpoint.go | 3 ++- registry/storage/driver/base/base.go | 2 +- registry/storage/driver/middleware/cloudfront/s3filter_test.go | 3 ++- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.gometalinter.json b/.gometalinter.json index 08fe705d..862c6307 100644 --- a/.gometalinter.json +++ b/.gometalinter.json @@ -5,6 +5,7 @@ "EnableGC": true, "Enable": [ "gofmt", + "goimports", "golint", "vet" ] diff --git a/notifications/endpoint.go b/notifications/endpoint.go index 285995ec..a8a52d0c 100644 --- a/notifications/endpoint.go +++ b/notifications/endpoint.go @@ -1,9 +1,10 @@ package notifications import ( - "github.com/docker/distribution/configuration" "net/http" "time" + + "github.com/docker/distribution/configuration" ) // EndpointConfig covers the optional configuration parameters for an active diff --git a/registry/storage/driver/base/base.go b/registry/storage/driver/base/base.go index ab55b022..4511f002 100644 --- a/registry/storage/driver/base/base.go +++ b/registry/storage/driver/base/base.go @@ -40,12 +40,12 @@ package base import ( "context" "io" + "time" dcontext "github.com/docker/distribution/context" prometheus "github.com/docker/distribution/metrics" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/go-metrics" - "time" ) var ( diff --git a/registry/storage/driver/middleware/cloudfront/s3filter_test.go b/registry/storage/driver/middleware/cloudfront/s3filter_test.go index 6aca3abc..7ff60fda 100644 --- a/registry/storage/driver/middleware/cloudfront/s3filter_test.go +++ b/registry/storage/driver/middleware/cloudfront/s3filter_test.go @@ -5,13 +5,14 @@ import ( "crypto/rand" "encoding/json" "fmt" - dcontext "github.com/docker/distribution/context" "net" "net/http" "net/http/httptest" "testing" "time" + dcontext "github.com/docker/distribution/context" + "reflect" // used as a replacement for testify ) From db0a4ec1c80420d0af350574e45b74e891bfe63b Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 6 Aug 2018 14:34:15 -0700 Subject: [PATCH 132/276] Enable static checks Signed-off-by: Derek McGowan --- .gometalinter.json | 2 ++ digestset/set_test.go | 6 +++--- health/health.go | 4 ++-- manifest/schema1/config_builder_test.go | 2 +- manifest/schema2/manifest_test.go | 2 +- notifications/sinks_test.go | 15 ++++++++++--- registry/auth/htpasswd/htpasswd.go | 2 +- registry/client/transport/http_reader.go | 9 ++++---- registry/handlers/api_test.go | 10 ++++----- registry/handlers/mail.go | 2 +- registry/proxy/proxyblobstore.go | 2 +- registry/proxy/proxyblobstore_test.go | 13 ++++++++---- registry/proxy/proxymanifeststore.go | 2 +- registry/storage/blob_test.go | 21 +++++++++---------- registry/storage/blobwriter_resumable.go | 6 +++--- registry/storage/cache/cachecheck/suite.go | 4 ++-- registry/storage/catalog_test.go | 4 ++-- registry/storage/driver/azure/azure.go | 4 ++-- registry/storage/driver/filesystem/driver.go | 8 +++---- .../middleware/cloudfront/middleware.go | 2 +- .../middleware/redirect/middleware_test.go | 5 +++-- registry/storage/driver/s3-aws/s3.go | 18 +++++++++++----- registry/storage/driver/s3-aws/s3_test.go | 4 ++-- .../storage/driver/s3-aws/s3_v2_signer.go | 4 ++-- registry/storage/driver/s3-goamz/s3_test.go | 4 ++-- registry/storage/driver/swift/swift_test.go | 8 +++---- .../storage/driver/testsuites/testsuites.go | 6 +++--- registry/storage/filereader.go | 13 ++++++------ registry/storage/filereader_test.go | 13 ++++++------ registry/storage/garbagecollect_test.go | 10 ++++----- registry/storage/purgeuploads.go | 2 +- registry/storage/tagstore.go | 2 +- testutil/tarfile.go | 3 +-- 33 files changed, 116 insertions(+), 96 deletions(-) diff --git a/.gometalinter.json b/.gometalinter.json index 0ceaa139..4eb018ff 100644 --- a/.gometalinter.json +++ b/.gometalinter.json @@ -5,6 +5,8 @@ "EnableGC": true, "Enable": [ "structcheck", + "staticcheck", + "unconvert", "gofmt", "golint", diff --git a/digestset/set_test.go b/digestset/set_test.go index 67d46c62..5f6d09d9 100644 --- a/digestset/set_test.go +++ b/digestset/set_test.go @@ -41,7 +41,7 @@ func TestLookup(t *testing.T) { } assertEqualDigests(t, dgst, digests[3]) - dgst, err = dset.Lookup("1234") + _, err = dset.Lookup("1234") if err == nil { t.Fatal("Expected ambiguous error looking up: 1234") } @@ -49,7 +49,7 @@ func TestLookup(t *testing.T) { t.Fatal(err) } - dgst, err = dset.Lookup("9876") + _, err = dset.Lookup("9876") if err == nil { t.Fatal("Expected not found error looking up: 9876") } @@ -57,7 +57,7 @@ func TestLookup(t *testing.T) { t.Fatal(err) } - dgst, err = dset.Lookup("sha256:1234") + _, err = dset.Lookup("sha256:1234") if err == nil { t.Fatal("Expected ambiguous error looking up: sha256:1234") } diff --git a/health/health.go b/health/health.go index 220282dc..592bce57 100644 --- a/health/health.go +++ b/health/health.go @@ -215,7 +215,7 @@ func RegisterFunc(name string, check func() error) { // RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker // from an arbitrary func() error. func (registry *Registry) RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { - registry.Register(name, PeriodicChecker(CheckFunc(check), period)) + registry.Register(name, PeriodicChecker(check, period)) } // RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker @@ -227,7 +227,7 @@ func RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { // RegisterPeriodicThresholdFunc allows the convenience of registering a // PeriodicChecker from an arbitrary func() error. func (registry *Registry) RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { - registry.Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) + registry.Register(name, PeriodicThresholdChecker(check, period, threshold)) } // RegisterPeriodicThresholdFunc allows the convenience of registering a diff --git a/manifest/schema1/config_builder_test.go b/manifest/schema1/config_builder_test.go index 360febd7..3e913a23 100644 --- a/manifest/schema1/config_builder_test.go +++ b/manifest/schema1/config_builder_test.go @@ -59,7 +59,7 @@ func TestEmptyTar(t *testing.T) { if err != nil { t.Fatalf("NewReader returned error: %v", err) } - n, err := gzipReader.Read(decompressed[:]) + n, _ := gzipReader.Read(decompressed[:]) if n != 1024 { t.Fatalf("read returned %d bytes; expected 1024", n) } diff --git a/manifest/schema2/manifest_test.go b/manifest/schema2/manifest_test.go index 912dda9a..45f7f7d0 100644 --- a/manifest/schema2/manifest_test.go +++ b/manifest/schema2/manifest_test.go @@ -56,7 +56,7 @@ func TestManifest(t *testing.T) { t.Fatalf("error creating DeserializedManifest: %v", err) } - mediaType, canonical, err := deserialized.Payload() + mediaType, canonical, _ := deserialized.Payload() if mediaType != MediaTypeManifest { t.Fatalf("unexpected media type: %s", mediaType) diff --git a/notifications/sinks_test.go b/notifications/sinks_test.go index 9613fe8a..06f88b2c 100644 --- a/notifications/sinks_test.go +++ b/notifications/sinks_test.go @@ -31,7 +31,7 @@ func TestBroadcaster(t *testing.T) { wg.Add(1) go func(block ...Event) { if err := b.Write(block...); err != nil { - t.Fatalf("error writing block of length %d: %v", len(block), err) + t.Errorf("error writing block of length %d: %v", len(block), err) } wg.Done() }(block...) @@ -41,6 +41,9 @@ func TestBroadcaster(t *testing.T) { } wg.Wait() // Wait until writes complete + if t.Failed() { + t.FailNow() + } checkClose(t, b) // Iterate through the sinks and check that they all have the expected length. @@ -79,7 +82,7 @@ func TestEventQueue(t *testing.T) { wg.Add(1) go func(block ...Event) { if err := eq.Write(block...); err != nil { - t.Fatalf("error writing event block: %v", err) + t.Errorf("error writing event block: %v", err) } wg.Done() }(block...) @@ -89,6 +92,9 @@ func TestEventQueue(t *testing.T) { } wg.Wait() + if t.Failed() { + t.FailNow() + } checkClose(t, eq) ts.mu.Lock() @@ -177,7 +183,7 @@ func TestRetryingSink(t *testing.T) { go func(block ...Event) { defer wg.Done() if err := s.Write(block...); err != nil { - t.Fatalf("error writing event block: %v", err) + t.Errorf("error writing event block: %v", err) } }(block...) @@ -186,6 +192,9 @@ func TestRetryingSink(t *testing.T) { } wg.Wait() + if t.Failed() { + t.FailNow() + } checkClose(t, s) ts.mu.Lock() diff --git a/registry/auth/htpasswd/htpasswd.go b/registry/auth/htpasswd/htpasswd.go index b10b256c..83205160 100644 --- a/registry/auth/htpasswd/htpasswd.go +++ b/registry/auth/htpasswd/htpasswd.go @@ -38,7 +38,7 @@ func (htpasswd *htpasswd) authenticateUser(username string, password string) err return auth.ErrAuthenticationFailure } - err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) + err := bcrypt.CompareHashAndPassword(credentials, []byte(password)) if err != nil { return auth.ErrAuthenticationFailure } diff --git a/registry/client/transport/http_reader.go b/registry/client/transport/http_reader.go index e5ff09d7..1d0b382f 100644 --- a/registry/client/transport/http_reader.go +++ b/registry/client/transport/http_reader.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "net/http" - "os" "regexp" "strconv" ) @@ -97,7 +96,7 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { lastReaderOffset := hrs.readerOffset - if whence == os.SEEK_SET && hrs.rc == nil { + if whence == io.SeekStart && hrs.rc == nil { // If no request has been made yet, and we are seeking to an // absolute position, set the read offset as well to avoid an // unnecessary request. @@ -113,14 +112,14 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { newOffset := hrs.seekOffset switch whence { - case os.SEEK_CUR: + case io.SeekCurrent: newOffset += offset - case os.SEEK_END: + case io.SeekEnd: if hrs.size < 0 { return 0, errors.New("content length not known") } newOffset = hrs.size + offset - case os.SEEK_SET: + case io.SeekStart: newOffset = offset } diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go index 446f1b83..d550d124 100644 --- a/registry/handlers/api_test.go +++ b/registry/handlers/api_test.go @@ -512,8 +512,8 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // ------------------------------------------ // Now, actually do successful upload. - layerLength, _ := layerFile.Seek(0, os.SEEK_END) - layerFile.Seek(0, os.SEEK_SET) + layerLength, _ := layerFile.Seek(0, io.SeekEnd) + layerFile.Seek(0, io.SeekStart) uploadURLBase, _ = startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) @@ -674,12 +674,12 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { // ---------------- // Reupload previously deleted blob - layerFile.Seek(0, os.SEEK_SET) + layerFile.Seek(0, io.SeekStart) uploadURLBase, _ := startPushLayer(t, env, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) - layerFile.Seek(0, os.SEEK_SET) + layerFile.Seek(0, io.SeekStart) canonicalDigester := digest.Canonical.Digester() if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { t.Fatalf("error copying to digest: %v", err) @@ -693,7 +693,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { t.Fatalf("unexpected error checking head on existing layer: %v", err) } - layerLength, _ := layerFile.Seek(0, os.SEEK_END) + layerLength, _ := layerFile.Seek(0, io.SeekEnd) checkResponse(t, "checking head on reuploaded layer", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Content-Length": []string{fmt.Sprint(layerLength)}, diff --git a/registry/handlers/mail.go b/registry/handlers/mail.go index 39244909..9e884265 100644 --- a/registry/handlers/mail.go +++ b/registry/handlers/mail.go @@ -36,7 +36,7 @@ func (mail *mailer) sendMail(subject, message string) error { auth, mail.From, mail.To, - []byte(msg), + msg, ) if err != nil { return err diff --git a/registry/proxy/proxyblobstore.go b/registry/proxy/proxyblobstore.go index 0f2c7e39..fc59552b 100644 --- a/registry/proxy/proxyblobstore.go +++ b/registry/proxy/proxyblobstore.go @@ -16,7 +16,7 @@ import ( ) // todo(richardscothern): from cache control header or config file -const blobTTL = time.Duration(24 * 7 * time.Hour) +const blobTTL = 24 * 7 * time.Hour type proxyBlobStore struct { localStore distribution.BlobStore diff --git a/registry/proxy/proxyblobstore_test.go b/registry/proxy/proxyblobstore_test.go index 9bee48d8..4bf4ea8a 100644 --- a/registry/proxy/proxyblobstore_test.go +++ b/registry/proxy/proxyblobstore_test.go @@ -350,24 +350,30 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) { w := httptest.NewRecorder() r, err := http.NewRequest("GET", "", nil) if err != nil { - t.Fatal(err) + t.Error(err) + return } err = te.store.ServeBlob(te.ctx, w, r, remoteBlob.Digest) if err != nil { - t.Fatalf(err.Error()) + t.Errorf(err.Error()) + return } bodyBytes := w.Body.Bytes() localDigest := digest.FromBytes(bodyBytes) if localDigest != remoteBlob.Digest { - t.Fatalf("Mismatching blob fetch from proxy") + t.Errorf("Mismatching blob fetch from proxy") + return } } }() } wg.Wait() + if t.Failed() { + t.FailNow() + } remoteBlobCount := len(te.inRemote) sbsMu.Lock() @@ -404,7 +410,6 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) { } } - localStats = te.LocalStats() remoteStats = te.RemoteStats() // Ensure remote unchanged diff --git a/registry/proxy/proxymanifeststore.go b/registry/proxy/proxymanifeststore.go index 5c7264d9..9b017a0b 100644 --- a/registry/proxy/proxymanifeststore.go +++ b/registry/proxy/proxymanifeststore.go @@ -12,7 +12,7 @@ import ( ) // todo(richardscothern): from cache control header or config -const repositoryTTL = time.Duration(24 * 7 * time.Hour) +const repositoryTTL = 24 * 7 * time.Hour type proxyManifestStore struct { ctx context.Context diff --git a/registry/storage/blob_test.go b/registry/storage/blob_test.go index fc027f4e..cce232a7 100644 --- a/registry/storage/blob_test.go +++ b/registry/storage/blob_test.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "io/ioutil" - "os" "path" "reflect" "testing" @@ -96,7 +95,7 @@ func TestSimpleBlobUpload(t *testing.T) { } // Do a resume, get unknown upload - blobUpload, err = bs.Resume(ctx, blobUpload.ID()) + _, err = bs.Resume(ctx, blobUpload.ID()) if err != distribution.ErrBlobUploadUnknown { t.Fatalf("unexpected error resuming upload, should be unknown: %v", err) } @@ -278,7 +277,7 @@ func TestSimpleBlobRead(t *testing.T) { t.Fatalf("expected not found error when testing for existence: %v", err) } - rc, err := bs.Open(ctx, dgst) + _, err = bs.Open(ctx, dgst) if err != distribution.ErrBlobUnknown { t.Fatalf("expected not found error when opening non-existent blob: %v", err) } @@ -300,7 +299,7 @@ func TestSimpleBlobRead(t *testing.T) { t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize) } - rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest. + rc, err := bs.Open(ctx, desc.Digest) // note that we are opening with original digest. if err != nil { t.Fatalf("error opening blob with %v: %v", dgst, err) } @@ -323,7 +322,7 @@ func TestSimpleBlobRead(t *testing.T) { } // Now seek back the blob, read the whole thing and check against randomLayerData - offset, err := rc.Seek(0, os.SEEK_SET) + offset, err := rc.Seek(0, io.SeekStart) if err != nil { t.Fatalf("error seeking blob: %v", err) } @@ -342,7 +341,7 @@ func TestSimpleBlobRead(t *testing.T) { } // Reset the randomLayerReader and read back the buffer - _, err = randomLayerReader.Seek(0, os.SEEK_SET) + _, err = randomLayerReader.Seek(0, io.SeekStart) if err != nil { t.Fatalf("error resetting layer reader: %v", err) } @@ -397,7 +396,7 @@ func TestBlobMount(t *testing.T) { t.Fatalf("error getting seeker size of random data: %v", err) } - nn, err := io.Copy(blobUpload, randomDataReader) + _, err = io.Copy(blobUpload, randomDataReader) if err != nil { t.Fatalf("unexpected error uploading layer data: %v", err) } @@ -460,7 +459,7 @@ func TestBlobMount(t *testing.T) { defer rc.Close() h := sha256.New() - nn, err = io.Copy(h, rc) + nn, err := io.Copy(h, rc) if err != nil { t.Fatalf("error reading layer: %v", err) } @@ -573,17 +572,17 @@ func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expec // the original state, returning the size. The state of the seeker should be // treated as unknown if an error is returned. func seekerSize(seeker io.ReadSeeker) (int64, error) { - current, err := seeker.Seek(0, os.SEEK_CUR) + current, err := seeker.Seek(0, io.SeekCurrent) if err != nil { return 0, err } - end, err := seeker.Seek(0, os.SEEK_END) + end, err := seeker.Seek(0, io.SeekEnd) if err != nil { return 0, err } - resumed, err := seeker.Seek(current, os.SEEK_SET) + resumed, err := seeker.Seek(current, io.SeekStart) if err != nil { return 0, err } diff --git a/registry/storage/blobwriter_resumable.go b/registry/storage/blobwriter_resumable.go index 8c909169..b700982a 100644 --- a/registry/storage/blobwriter_resumable.go +++ b/registry/storage/blobwriter_resumable.go @@ -29,7 +29,7 @@ func (bw *blobWriter) resumeDigest(ctx context.Context) error { return errResumableDigestNotAvailable } offset := bw.fileWriter.Size() - if offset == int64(h.Len()) { + if offset == h.Len() { // State of digester is already at the requested offset. return nil } @@ -65,7 +65,7 @@ func (bw *blobWriter) resumeDigest(ctx context.Context) error { } // Mind the gap. - if gapLen := offset - int64(h.Len()); gapLen > 0 { + if gapLen := offset - h.Len(); gapLen > 0 { return errResumableDigestNotAvailable } @@ -129,7 +129,7 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error { name: bw.blobStore.repository.Named().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), - offset: int64(h.Len()), + offset: h.Len(), }) if err != nil { diff --git a/registry/storage/cache/cachecheck/suite.go b/registry/storage/cache/cachecheck/suite.go index b23765e8..0b8b1e9e 100644 --- a/registry/storage/cache/cachecheck/suite.go +++ b/registry/storage/cache/cachecheck/suite.go @@ -26,12 +26,12 @@ func checkBlobDescriptorCacheEmptyRepository(ctx context.Context, t *testing.T, t.Fatalf("expected unknown blob error with empty store: %v", err) } - cache, err := provider.RepositoryScoped("") + _, err := provider.RepositoryScoped("") if err == nil { t.Fatalf("expected an error when asking for invalid repo") } - cache, err = provider.RepositoryScoped("foo/bar") + cache, err := provider.RepositoryScoped("foo/bar") if err != nil { t.Fatalf("unexpected error getting repository: %v", err) } diff --git a/registry/storage/catalog_test.go b/registry/storage/catalog_test.go index 696e024e..c38b252e 100644 --- a/registry/storage/catalog_test.go +++ b/registry/storage/catalog_test.go @@ -273,7 +273,7 @@ func BenchmarkPathCompareNative(B *testing.B) { for i := 0; i < B.N; i++ { c := a < b - c = c && false + _ = c && false } } @@ -285,7 +285,7 @@ func BenchmarkPathCompareNativeEqual(B *testing.B) { for i := 0; i < B.N; i++ { c := a < b - c = c && false + _ = c && false } } diff --git a/registry/storage/driver/azure/azure.go b/registry/storage/driver/azure/azure.go index 993f43c4..906e7792 100644 --- a/registry/storage/driver/azure/azure.go +++ b/registry/storage/driver/azure/azure.go @@ -169,7 +169,7 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read return nil, err } info := blobRef.Properties - size := int64(info.ContentLength) + size := info.ContentLength if offset >= size { return ioutil.NopCloser(bytes.NewReader(nil)), nil } @@ -238,7 +238,7 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ Path: path, - Size: int64(blobProperties.ContentLength), + Size: blobProperties.ContentLength, ModTime: time.Time(blobProperties.LastModified), IsDir: false, }}, nil diff --git a/registry/storage/driver/filesystem/driver.go b/registry/storage/driver/filesystem/driver.go index 9a9c062a..9036536b 100644 --- a/registry/storage/driver/filesystem/driver.go +++ b/registry/storage/driver/filesystem/driver.go @@ -184,11 +184,11 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read return nil, err } - seekPos, err := file.Seek(int64(offset), os.SEEK_SET) + seekPos, err := file.Seek(offset, io.SeekStart) if err != nil { file.Close() return nil, err - } else if seekPos < int64(offset) { + } else if seekPos < offset { file.Close() return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } @@ -217,12 +217,12 @@ func (d *driver) Writer(ctx context.Context, subPath string, append bool) (stora return nil, err } } else { - n, err := fp.Seek(0, os.SEEK_END) + n, err := fp.Seek(0, io.SeekEnd) if err != nil { fp.Close() return nil, err } - offset = int64(n) + offset = n } return newFileWriter(fp, offset), nil diff --git a/registry/storage/driver/middleware/cloudfront/middleware.go b/registry/storage/driver/middleware/cloudfront/middleware.go index 5dc3ee41..83a36a72 100644 --- a/registry/storage/driver/middleware/cloudfront/middleware.go +++ b/registry/storage/driver/middleware/cloudfront/middleware.go @@ -86,7 +86,7 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o return nil, fmt.Errorf("failed to read privatekey file: %s", err) } - block, _ := pem.Decode([]byte(pkBytes)) + block, _ := pem.Decode(pkBytes) if block == nil { return nil, fmt.Errorf("failed to decode private key as an rsa private key") } diff --git a/registry/storage/driver/middleware/redirect/middleware_test.go b/registry/storage/driver/middleware/redirect/middleware_test.go index 1eb6309f..36f08c28 100644 --- a/registry/storage/driver/middleware/redirect/middleware_test.go +++ b/registry/storage/driver/middleware/redirect/middleware_test.go @@ -1,6 +1,7 @@ package middleware import ( + "context" "testing" check "gopkg.in/check.v1" @@ -36,7 +37,7 @@ func (s *MiddlewareSuite) TestHttpsPort(c *check.C) { c.Assert(m.scheme, check.Equals, "https") c.Assert(m.host, check.Equals, "example.com:5443") - url, err := middleware.URLFor(nil, "/rick/data", nil) + url, err := middleware.URLFor(context.TODO(), "/rick/data", nil) c.Assert(err, check.Equals, nil) c.Assert(url, check.Equals, "https://example.com:5443/rick/data") } @@ -52,7 +53,7 @@ func (s *MiddlewareSuite) TestHTTP(c *check.C) { c.Assert(m.scheme, check.Equals, "http") c.Assert(m.host, check.Equals, "example.com") - url, err := middleware.URLFor(nil, "morty/data", nil) + url, err := middleware.URLFor(context.TODO(), "morty/data", nil) c.Assert(err, check.Equals, nil) c.Assert(url, check.Equals, "http://example.com/morty/data") } diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 0c6f7e5e..4bd80b3f 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -197,14 +197,14 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { regionEndpoint = "" } - regionName, ok := parameters["region"] + regionName := parameters["region"] if regionName == nil || fmt.Sprint(regionName) == "" { return nil, fmt.Errorf("No region parameter provided") } region := fmt.Sprint(regionName) // Don't check the region value if a custom endpoint is provided. if regionEndpoint == "" { - if _, ok = validRegions[region]; !ok { + if _, ok := validRegions[region]; !ok { return nil, fmt.Errorf("Invalid region provided: %v", region) } } @@ -398,6 +398,10 @@ func New(params DriverParameters) (*Driver, error) { } awsConfig := aws.NewConfig() + sess, err := session.NewSession() + if err != nil { + return nil, fmt.Errorf("failed to create new session: %v", err) + } creds := credentials.NewChainCredentials([]credentials.Provider{ &credentials.StaticProvider{ Value: credentials.Value{ @@ -408,7 +412,7 @@ func New(params DriverParameters) (*Driver, error) { }, &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{}, - &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess)}, }) if params.RegionEndpoint != "" { @@ -426,7 +430,11 @@ func New(params DriverParameters) (*Driver, error) { }) } - s3obj := s3.New(session.New(awsConfig)) + sess, err = session.NewSession(awsConfig) + if err != nil { + return nil, fmt.Errorf("failed to create new session with aws config: %v", err) + } + s3obj := s3.New(sess) // enable S3 compatible signature v2 signing instead if !params.V4Auth { @@ -1150,10 +1158,10 @@ func (w *writer) Write(p []byte) (int, error) { Bucket: aws.String(w.driver.Bucket), Key: aws.String(w.key), }) - defer resp.Body.Close() if err != nil { return 0, err } + defer resp.Body.Close() w.parts = nil w.readyPart, err = ioutil.ReadAll(resp.Body) if err != nil { diff --git a/registry/storage/driver/s3-aws/s3_test.go b/registry/storage/driver/s3-aws/s3_test.go index 363a22eb..ee0b66d8 100644 --- a/registry/storage/driver/s3-aws/s3_test.go +++ b/registry/storage/driver/s3-aws/s3_test.go @@ -139,14 +139,14 @@ func TestEmptyRootList(t *testing.T) { } defer rootedDriver.Delete(ctx, filename) - keys, err := emptyRootDriver.List(ctx, "/") + keys, _ := emptyRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) } } - keys, err = slashRootDriver.List(ctx, "/") + keys, _ = slashRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) diff --git a/registry/storage/driver/s3-aws/s3_v2_signer.go b/registry/storage/driver/s3-aws/s3_v2_signer.go index 703660cf..29688ae5 100644 --- a/registry/storage/driver/s3-aws/s3_v2_signer.go +++ b/registry/storage/driver/s3-aws/s3_v2_signer.go @@ -209,9 +209,9 @@ func (v2 *signer) Sign() error { v2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil)) if expires { - params["Signature"] = []string{string(v2.signature)} + params["Signature"] = []string{v2.signature} } else { - headers["Authorization"] = []string{"AWS " + accessKey + ":" + string(v2.signature)} + headers["Authorization"] = []string{"AWS " + accessKey + ":" + v2.signature} } log.WithFields(log.Fields{ diff --git a/registry/storage/driver/s3-goamz/s3_test.go b/registry/storage/driver/s3-goamz/s3_test.go index 352ec3f5..34bfb5d0 100644 --- a/registry/storage/driver/s3-goamz/s3_test.go +++ b/registry/storage/driver/s3-goamz/s3_test.go @@ -125,14 +125,14 @@ func TestEmptyRootList(t *testing.T) { } defer rootedDriver.Delete(ctx, filename) - keys, err := emptyRootDriver.List(ctx, "/") + keys, _ := emptyRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) } } - keys, err = slashRootDriver.List(ctx, "/") + keys, _ = slashRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) diff --git a/registry/storage/driver/swift/swift_test.go b/registry/storage/driver/swift/swift_test.go index dcd5e4ff..00288cba 100644 --- a/registry/storage/driver/swift/swift_test.go +++ b/registry/storage/driver/swift/swift_test.go @@ -148,14 +148,14 @@ func TestEmptyRootList(t *testing.T) { t.Fatalf("unexpected error creating content: %v", err) } - keys, err := emptyRootDriver.List(ctx, "/") + keys, _ := emptyRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) } } - keys, err = slashRootDriver.List(ctx, "/") + keys, _ = slashRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) @@ -234,11 +234,11 @@ func TestFilenameChunking(t *testing.T) { } // Test 0 and < 0 sizes - actual, err = chunkFilenames(nil, 0) + _, err = chunkFilenames(nil, 0) if err == nil { t.Fatal("expected error for size = 0") } - actual, err = chunkFilenames(nil, -1) + _, err = chunkFilenames(nil, -1) if err == nil { t.Fatal("expected error for size = -1") } diff --git a/registry/storage/driver/testsuites/testsuites.go b/registry/storage/driver/testsuites/testsuites.go index 4f2f1ab0..5e37c5f3 100644 --- a/registry/storage/driver/testsuites/testsuites.go +++ b/registry/storage/driver/testsuites/testsuites.go @@ -136,7 +136,7 @@ func (suite *DriverSuite) deletePath(c *check.C, path string) { err = nil } c.Assert(err, check.IsNil) - paths, err := suite.StorageDriver.List(suite.ctx, path) + paths, _ := suite.StorageDriver.List(suite.ctx, path) if len(paths) == 0 { break } @@ -651,7 +651,7 @@ func (suite *DriverSuite) TestURLFor(c *check.C) { } c.Assert(err, check.IsNil) - response, err = http.Head(url) + response, _ = http.Head(url) c.Assert(response.StatusCode, check.Equals, 200) c.Assert(response.ContentLength, check.Equals, int64(32)) } @@ -1116,7 +1116,7 @@ func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { c.Assert(err, check.IsNil) tf.Sync() - tf.Seek(0, os.SEEK_SET) + tf.Seek(0, io.SeekStart) writer, err := suite.StorageDriver.Writer(suite.ctx, filename, false) c.Assert(err, check.IsNil) diff --git a/registry/storage/filereader.go b/registry/storage/filereader.go index 2050e43d..90afaad0 100644 --- a/registry/storage/filereader.go +++ b/registry/storage/filereader.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "io/ioutil" - "os" storagedriver "github.com/docker/distribution/registry/storage/driver" ) @@ -81,12 +80,12 @@ func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { newOffset := fr.offset switch whence { - case os.SEEK_CUR: - newOffset += int64(offset) - case os.SEEK_END: - newOffset = fr.size + int64(offset) - case os.SEEK_SET: - newOffset = int64(offset) + case io.SeekCurrent: + newOffset += offset + case io.SeekEnd: + newOffset = fr.size + offset + case io.SeekStart: + newOffset = offset } if newOffset < 0 { diff --git a/registry/storage/filereader_test.go b/registry/storage/filereader_test.go index e522d605..305366f4 100644 --- a/registry/storage/filereader_test.go +++ b/registry/storage/filereader_test.go @@ -4,7 +4,6 @@ import ( "bytes" "io" mrand "math/rand" - "os" "testing" "github.com/docker/distribution/context" @@ -72,7 +71,7 @@ func TestFileReaderSeek(t *testing.T) { for _, repitition := range mrand.Perm(repititions - 1) { targetOffset := int64(len(pattern) * repitition) // Seek to a multiple of pattern size and read pattern size bytes - offset, err := fr.Seek(targetOffset, os.SEEK_SET) + offset, err := fr.Seek(targetOffset, io.SeekStart) if err != nil { t.Fatalf("unexpected error seeking: %v", err) } @@ -97,7 +96,7 @@ func TestFileReaderSeek(t *testing.T) { } // Check offset - current, err := fr.Seek(0, os.SEEK_CUR) + current, err := fr.Seek(0, io.SeekCurrent) if err != nil { t.Fatalf("error checking current offset: %v", err) } @@ -107,7 +106,7 @@ func TestFileReaderSeek(t *testing.T) { } } - start, err := fr.Seek(0, os.SEEK_SET) + start, err := fr.Seek(0, io.SeekStart) if err != nil { t.Fatalf("error seeking to start: %v", err) } @@ -116,7 +115,7 @@ func TestFileReaderSeek(t *testing.T) { t.Fatalf("expected to seek to start: %v != 0", start) } - end, err := fr.Seek(0, os.SEEK_END) + end, err := fr.Seek(0, io.SeekEnd) if err != nil { t.Fatalf("error checking current offset: %v", err) } @@ -128,13 +127,13 @@ func TestFileReaderSeek(t *testing.T) { // 4. Seek before start, ensure error. // seek before start - before, err := fr.Seek(-1, os.SEEK_SET) + before, err := fr.Seek(-1, io.SeekStart) if err == nil { t.Fatalf("error expected, returned offset=%v", before) } // 5. Seek after end, - after, err := fr.Seek(1, os.SEEK_END) + after, err := fr.Seek(1, io.SeekEnd) if err != nil { t.Fatalf("unexpected error expected, returned offset=%v", after) } diff --git a/registry/storage/garbagecollect_test.go b/registry/storage/garbagecollect_test.go index 1694f891..533ea254 100644 --- a/registry/storage/garbagecollect_test.go +++ b/registry/storage/garbagecollect_test.go @@ -164,7 +164,7 @@ func TestNoDeletionNoEffect(t *testing.T) { registry := createRegistry(t, inmemoryDriver) repo := makeRepository(t, registry, "palailogos") - manifestService, err := repo.Manifests(ctx) + manifestService, _ := repo.Manifests(ctx) image1 := uploadRandomSchema1Image(t, repo) image2 := uploadRandomSchema1Image(t, repo) @@ -206,7 +206,7 @@ func TestDeleteManifestIfTagNotFound(t *testing.T) { registry := createRegistry(t, inmemoryDriver) repo := makeRepository(t, registry, "deletemanifests") - manifestService, err := repo.Manifests(ctx) + manifestService, _ := repo.Manifests(ctx) // Create random layers randomLayers1, err := testutil.CreateRandomLayers(3) @@ -252,7 +252,7 @@ func TestDeleteManifestIfTagNotFound(t *testing.T) { } manifestEnumerator, _ := manifestService.(distribution.ManifestEnumerator) - err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { + manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error { repo.Tags(ctx).Tag(ctx, "test", distribution.Descriptor{Digest: dgst}) return nil }) @@ -336,7 +336,7 @@ func TestDeletionHasEffect(t *testing.T) { registry := createRegistry(t, inmemoryDriver) repo := makeRepository(t, registry, "komnenos") - manifests, err := repo.Manifests(ctx) + manifests, _ := repo.Manifests(ctx) image1 := uploadRandomSchema1Image(t, repo) image2 := uploadRandomSchema1Image(t, repo) @@ -346,7 +346,7 @@ func TestDeletionHasEffect(t *testing.T) { manifests.Delete(ctx, image3.manifestDigest) // Run GC - err = MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{ + err := MarkAndSweep(context.Background(), inmemoryDriver, registry, GCOpts{ DryRun: false, RemoveUntagged: false, }) diff --git a/registry/storage/purgeuploads.go b/registry/storage/purgeuploads.go index a82107a7..7ebd1604 100644 --- a/registry/storage/purgeuploads.go +++ b/registry/storage/purgeuploads.go @@ -22,7 +22,7 @@ func newUploadData() uploadData { return uploadData{ containingDir: "", // default to far in future to protect against missing startedat - startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), + startedAt: time.Now().Add(10000 * time.Hour), } } diff --git a/registry/storage/tagstore.go b/registry/storage/tagstore.go index ab5f54a6..f80b9628 100644 --- a/registry/storage/tagstore.go +++ b/registry/storage/tagstore.go @@ -179,7 +179,7 @@ func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([ tag: tag, } - tagLinkPath, err := pathFor(tagLinkPathSpec) + tagLinkPath, _ := pathFor(tagLinkPathSpec) tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) if err != nil { switch err.(type) { diff --git a/testutil/tarfile.go b/testutil/tarfile.go index cb93602f..2ebd364a 100644 --- a/testutil/tarfile.go +++ b/testutil/tarfile.go @@ -88,8 +88,7 @@ func CreateRandomLayers(n int) (map[digest.Digest]io.ReadSeeker, error) { return nil, fmt.Errorf("unexpected error generating test layer file: %v", err) } - dgst := digest.Digest(ds) - digestMap[dgst] = rs + digestMap[ds] = rs } return digestMap, nil } From 9caa7a81bc66cd20f5746f110dc154922a47fde6 Mon Sep 17 00:00:00 2001 From: dmp Date: Fri, 10 Aug 2018 09:53:53 -0700 Subject: [PATCH 133/276] Remove goamz Signed-off-by: Olivier --- cmd/registry/main.go | 1 - registry/storage/driver/s3-goamz/s3.go | 766 ---------- registry/storage/driver/s3-goamz/s3_test.go | 201 --- vendor.conf | 1 - vendor/github.com/docker/goamz/LICENSE | 185 --- vendor/github.com/docker/goamz/README.md | 68 - vendor/github.com/docker/goamz/aws/attempt.go | 74 - vendor/github.com/docker/goamz/aws/aws.go | 636 -------- vendor/github.com/docker/goamz/aws/client.go | 124 -- vendor/github.com/docker/goamz/aws/regions.go | 289 ---- vendor/github.com/docker/goamz/aws/retry.go | 136 -- vendor/github.com/docker/goamz/aws/sign.go | 472 ------ .../github.com/docker/goamz/s3/lifecycle.go | 202 --- vendor/github.com/docker/goamz/s3/multi.go | 508 ------- vendor/github.com/docker/goamz/s3/s3.go | 1305 ----------------- vendor/github.com/docker/goamz/s3/sign.go | 120 -- 16 files changed, 5088 deletions(-) delete mode 100644 registry/storage/driver/s3-goamz/s3.go delete mode 100644 registry/storage/driver/s3-goamz/s3_test.go delete mode 100644 vendor/github.com/docker/goamz/LICENSE delete mode 100644 vendor/github.com/docker/goamz/README.md delete mode 100644 vendor/github.com/docker/goamz/aws/attempt.go delete mode 100644 vendor/github.com/docker/goamz/aws/aws.go delete mode 100644 vendor/github.com/docker/goamz/aws/client.go delete mode 100644 vendor/github.com/docker/goamz/aws/regions.go delete mode 100644 vendor/github.com/docker/goamz/aws/retry.go delete mode 100644 vendor/github.com/docker/goamz/aws/sign.go delete mode 100644 vendor/github.com/docker/goamz/s3/lifecycle.go delete mode 100644 vendor/github.com/docker/goamz/s3/multi.go delete mode 100644 vendor/github.com/docker/goamz/s3/s3.go delete mode 100644 vendor/github.com/docker/goamz/s3/sign.go diff --git a/cmd/registry/main.go b/cmd/registry/main.go index c077a0c1..5beaa563 100644 --- a/cmd/registry/main.go +++ b/cmd/registry/main.go @@ -16,7 +16,6 @@ import ( _ "github.com/docker/distribution/registry/storage/driver/middleware/redirect" _ "github.com/docker/distribution/registry/storage/driver/oss" _ "github.com/docker/distribution/registry/storage/driver/s3-aws" - _ "github.com/docker/distribution/registry/storage/driver/s3-goamz" _ "github.com/docker/distribution/registry/storage/driver/swift" ) diff --git a/registry/storage/driver/s3-goamz/s3.go b/registry/storage/driver/s3-goamz/s3.go deleted file mode 100644 index 4bee38d7..00000000 --- a/registry/storage/driver/s3-goamz/s3.go +++ /dev/null @@ -1,766 +0,0 @@ -// Package s3 provides a storagedriver.StorageDriver implementation to -// store blobs in Amazon S3 cloud storage. -// -// This package leverages the docker/goamz client library for interfacing with -// S3. It is intended to be deprecated in favor of the s3-aws driver -// implementation. -// -// Because S3 is a key, value store the Stat call does not support last modification -// time for directories (directories are an abstraction for key, value stores) -// -// Keep in mind that S3 guarantees only read-after-write consistency for new -// objects, but no read-after-update or list-after-write consistency. -package s3 - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/docker/distribution/registry/client/transport" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/base" - "github.com/docker/distribution/registry/storage/driver/factory" - "github.com/docker/goamz/aws" - "github.com/docker/goamz/s3" -) - -const driverName = "s3goamz" - -// minChunkSize defines the minimum multipart upload chunk size -// S3 API requires multipart upload chunks to be at least 5MB -const minChunkSize = 5 << 20 - -const defaultChunkSize = 2 * minChunkSize - -// listMax is the largest amount of objects you can request from S3 in a list call -const listMax = 1000 - -//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set -type DriverParameters struct { - AccessKey string - SecretKey string - Bucket string - Region aws.Region - Encrypt bool - Secure bool - V4Auth bool - ChunkSize int64 - RootDirectory string - StorageClass s3.StorageClass - UserAgent string -} - -func init() { - factory.Register(driverName, &s3DriverFactory{}) -} - -// s3DriverFactory implements the factory.StorageDriverFactory interface -type s3DriverFactory struct{} - -func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { - return FromParameters(parameters) -} - -type driver struct { - S3 *s3.S3 - Bucket *s3.Bucket - ChunkSize int64 - Encrypt bool - RootDirectory string - StorageClass s3.StorageClass -} - -type baseEmbed struct { - base.Base -} - -// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 -// Objects are stored at absolute keys in the provided bucket. -type Driver struct { - baseEmbed -} - -// FromParameters constructs a new Driver with a given parameters map -// Required parameters: -// - accesskey -// - secretkey -// - region -// - bucket -// - encrypt -func FromParameters(parameters map[string]interface{}) (*Driver, error) { - // Providing no values for these is valid in case the user is authenticating - // with an IAM on an ec2 instance (in which case the instance credentials will - // be summoned when GetAuth is called) - accessKey := parameters["accesskey"] - if accessKey == nil { - accessKey = "" - } - - secretKey := parameters["secretkey"] - if secretKey == nil { - secretKey = "" - } - - regionName := parameters["region"] - if regionName == nil || fmt.Sprint(regionName) == "" { - return nil, fmt.Errorf("No region parameter provided") - } - region := aws.GetRegion(fmt.Sprint(regionName)) - if region.Name == "" { - return nil, fmt.Errorf("Invalid region provided: %v", region) - } - - bucket := parameters["bucket"] - if bucket == nil || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") - } - - encryptBool := false - encrypt := parameters["encrypt"] - switch encrypt := encrypt.(type) { - case string: - b, err := strconv.ParseBool(encrypt) - if err != nil { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - encryptBool = b - case bool: - encryptBool = encrypt - case nil: - // do nothing - default: - return nil, fmt.Errorf("The encrypt parameter should be a boolean") - } - - secureBool := true - secure := parameters["secure"] - switch secure := secure.(type) { - case string: - b, err := strconv.ParseBool(secure) - if err != nil { - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - secureBool = b - case bool: - secureBool = secure - case nil: - // do nothing - default: - return nil, fmt.Errorf("The secure parameter should be a boolean") - } - - v4AuthBool := false - v4Auth := parameters["v4auth"] - switch v4Auth := v4Auth.(type) { - case string: - b, err := strconv.ParseBool(v4Auth) - if err != nil { - return nil, fmt.Errorf("The v4auth parameter should be a boolean") - } - v4AuthBool = b - case bool: - v4AuthBool = v4Auth - case nil: - // do nothing - default: - return nil, fmt.Errorf("The v4auth parameter should be a boolean") - } - - chunkSize := int64(defaultChunkSize) - chunkSizeParam := parameters["chunksize"] - switch v := chunkSizeParam.(type) { - case string: - vv, err := strconv.ParseInt(v, 0, 64) - if err != nil { - return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) - } - chunkSize = vv - case int64: - chunkSize = v - case int, uint, int32, uint32, uint64: - chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() - case nil: - // do nothing - default: - return nil, fmt.Errorf("invalid value for chunksize: %#v", chunkSizeParam) - } - - if chunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) - } - - rootDirectory := parameters["rootdirectory"] - if rootDirectory == nil { - rootDirectory = "" - } - - storageClass := s3.StandardStorage - storageClassParam := parameters["storageclass"] - if storageClassParam != nil { - storageClassString, ok := storageClassParam.(string) - if !ok { - return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) - } - // All valid storage class parameters are UPPERCASE, so be a bit more flexible here - storageClassCasted := s3.StorageClass(strings.ToUpper(storageClassString)) - if storageClassCasted != s3.StandardStorage && storageClassCasted != s3.ReducedRedundancy { - return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []s3.StorageClass{s3.StandardStorage, s3.ReducedRedundancy}, storageClassParam) - } - storageClass = storageClassCasted - } - - userAgent := parameters["useragent"] - if userAgent == nil { - userAgent = "" - } - - params := DriverParameters{ - fmt.Sprint(accessKey), - fmt.Sprint(secretKey), - fmt.Sprint(bucket), - region, - encryptBool, - secureBool, - v4AuthBool, - chunkSize, - fmt.Sprint(rootDirectory), - storageClass, - fmt.Sprint(userAgent), - } - - return New(params) -} - -// New constructs a new Driver with the given AWS credentials, region, encryption flag, and -// bucketName -func New(params DriverParameters) (*Driver, error) { - auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) - if err != nil { - return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) - } - - if !params.Secure { - params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) - } - - s3obj := s3.New(auth, params.Region) - - if params.UserAgent != "" { - s3obj.Client = &http.Client{ - Transport: transport.NewTransport(http.DefaultTransport, - transport.NewHeaderRequestModifier(http.Header{ - http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}, - }), - ), - } - } - - if params.V4Auth { - s3obj.Signature = aws.V4Signature - } else if mustV4Auth(params.Region.Name) { - return nil, fmt.Errorf("The %s region only works with v4 authentication", params.Region.Name) - } - - bucket := s3obj.Bucket(params.Bucket) - - // TODO Currently multipart uploads have no timestamps, so this would be unwise - // if you initiated a new s3driver while another one is running on the same bucket. - // multis, _, err := bucket.ListMulti("", "") - // if err != nil { - // return nil, err - // } - - // for _, multi := range multis { - // err := multi.Abort() - // //TODO appropriate to do this error checking? - // if err != nil { - // return nil, err - // } - // } - - d := &driver{ - S3: s3obj, - Bucket: bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - RootDirectory: params.RootDirectory, - StorageClass: params.StorageClass, - } - - return &Driver{ - baseEmbed: baseEmbed{ - Base: base.Base{ - StorageDriver: d, - }, - }, - }, nil -} - -// Implement the storagedriver.StorageDriver interface - -func (d *driver) Name() string { - return driverName -} - -// GetContent retrieves the content stored at "path" as a []byte. -func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { - content, err := d.Bucket.Get(d.s3Path(path)) - if err != nil { - return nil, parseError(path, err) - } - return content, nil -} - -// PutContent stores the []byte content at a location designated by "path". -func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { - return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) -} - -// Reader retrieves an io.ReadCloser for the content stored at "path" with a -// given byte offset. -func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { - headers := make(http.Header) - headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") - - resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) - if err != nil { - if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - return nil, parseError(path, err) - } - return resp.Body, nil -} - -// Writer returns a FileWriter which will store the content written to it -// at the location designated by "path" after the call to Commit. -func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { - key := d.s3Path(path) - if !append { - // TODO (brianbland): cancel other uploads at this path - multi, err := d.Bucket.InitMulti(key, d.getContentType(), getPermissions(), d.getOptions()) - if err != nil { - return nil, err - } - return d.newWriter(key, multi, nil), nil - } - multis, _, err := d.Bucket.ListMulti(key, "") - if err != nil { - return nil, parseError(path, err) - } - for _, multi := range multis { - if key != multi.Key { - continue - } - parts, err := multi.ListParts() - if err != nil { - return nil, parseError(path, err) - } - var multiSize int64 - for _, part := range parts { - multiSize += part.Size - } - return d.newWriter(key, multi, parts), nil - } - return nil, storagedriver.PathNotFoundError{Path: path} -} - -// Stat retrieves the FileInfo for the given path, including the current size -// in bytes and the creation time. -func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { - listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) - if err != nil { - return nil, err - } - - fi := storagedriver.FileInfoFields{ - Path: path, - } - - if len(listResponse.Contents) == 1 { - if listResponse.Contents[0].Key != d.s3Path(path) { - fi.IsDir = true - } else { - fi.IsDir = false - fi.Size = listResponse.Contents[0].Size - - timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) - if err != nil { - return nil, err - } - fi.ModTime = timestamp - } - } else if len(listResponse.CommonPrefixes) == 1 { - fi.IsDir = true - } else { - return nil, storagedriver.PathNotFoundError{Path: path} - } - - return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil -} - -// List returns a list of the objects that are direct descendants of the given path. -func (d *driver) List(ctx context.Context, opath string) ([]string, error) { - path := opath - if path != "/" && path[len(path)-1] != '/' { - path = path + "/" - } - - // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". - // In those cases, there is no root prefix to replace and we must actually add a "/" to all - // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp - prefix := "" - if d.s3Path("") == "" { - prefix = "/" - } - - listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) - if err != nil { - return nil, parseError(opath, err) - } - - files := []string{} - directories := []string{} - - for { - for _, key := range listResponse.Contents { - files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) - } - - for _, commonPrefix := range listResponse.CommonPrefixes { - directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) - } - - if !listResponse.IsTruncated { - break - } - - listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) - if err != nil { - return nil, err - } - } - - if opath != "/" { - if len(files) == 0 && len(directories) == 0 { - // Treat empty response as missing directory, since we don't actually - // have directories in s3. - return nil, storagedriver.PathNotFoundError{Path: opath} - } - } - - return append(files, directories...), nil -} - -// Move moves an object stored at sourcePath to destPath, removing the original -// object. -func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { - /* This is terrible, but aws doesn't have an actual move. */ - _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), - s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) - if err != nil { - return parseError(sourcePath, err) - } - - return d.Delete(ctx, sourcePath) -} - -// Delete recursively deletes all objects stored at "path" and its subpaths. -func (d *driver) Delete(ctx context.Context, path string) error { - s3Path := d.s3Path(path) - listResponse, err := d.Bucket.List(s3Path, "", "", listMax) - if err != nil || len(listResponse.Contents) == 0 { - return storagedriver.PathNotFoundError{Path: path} - } - - s3Objects := make([]s3.Object, listMax) - - for len(listResponse.Contents) > 0 { - numS3Objects := len(listResponse.Contents) - for index, key := range listResponse.Contents { - // Stop if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab"). - if len(key.Key) > len(s3Path) && (key.Key)[len(s3Path)] != '/' { - numS3Objects = index - break - } - s3Objects[index].Key = key.Key - } - - err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:numS3Objects]}) - if err != nil { - return nil - } - - if numS3Objects < len(listResponse.Contents) { - return nil - } - - listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) - if err != nil { - return err - } - } - - return nil -} - -// URLFor returns a URL which may be used to retrieve the content stored at the given path. -// May return an UnsupportedMethodErr in certain StorageDriver implementations. -func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { - methodString := "GET" - method, ok := options["method"] - if ok { - methodString, ok = method.(string) - if !ok || (methodString != "GET" && methodString != "HEAD") { - return "", storagedriver.ErrUnsupportedMethod{} - } - } - - expiresTime := time.Now().Add(20 * time.Minute) - expires, ok := options["expiry"] - if ok { - et, ok := expires.(time.Time) - if ok { - expiresTime = et - } - } - - return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil -} - -func (d *driver) s3Path(path string) string { - return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") -} - -// S3BucketKey returns the s3 bucket key for the given storage driver path. -func (d *Driver) S3BucketKey(path string) string { - return d.StorageDriver.(*driver).s3Path(path) -} - -// Walk traverses a filesystem defined within driver, starting -// from the given path, calling f on each file -func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn) error { - return storagedriver.WalkFallback(ctx, d, path, f) -} - -func parseError(path string, err error) error { - if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { - return storagedriver.PathNotFoundError{Path: path} - } - - return err -} - -func (d *driver) getOptions() s3.Options { - return s3.Options{ - SSE: d.Encrypt, - StorageClass: d.StorageClass, - } -} - -func getPermissions() s3.ACL { - return s3.Private -} - -// mustV4Auth checks whether must use v4 auth in specific region. -// Please see documentation at http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html -func mustV4Auth(region string) bool { - switch region { - case "eu-central-1", "cn-north-1", "us-east-2", - "ca-central-1", "ap-south-1", "ap-northeast-2", "eu-west-2": - return true - } - return false -} - -func (d *driver) getContentType() string { - return "application/octet-stream" -} - -// writer attempts to upload parts to S3 in a buffered fashion where the last -// part is at least as large as the chunksize, so the multipart upload could be -// cleanly resumed in the future. This is violated if Close is called after less -// than a full chunk is written. -type writer struct { - driver *driver - key string - multi *s3.Multi - parts []s3.Part - size int64 - readyPart []byte - pendingPart []byte - closed bool - committed bool - cancelled bool -} - -func (d *driver) newWriter(key string, multi *s3.Multi, parts []s3.Part) storagedriver.FileWriter { - var size int64 - for _, part := range parts { - size += part.Size - } - return &writer{ - driver: d, - key: key, - multi: multi, - parts: parts, - size: size, - } -} - -func (w *writer) Write(p []byte) (int, error) { - if w.closed { - return 0, fmt.Errorf("already closed") - } else if w.committed { - return 0, fmt.Errorf("already committed") - } else if w.cancelled { - return 0, fmt.Errorf("already cancelled") - } - - // If the last written part is smaller than minChunkSize, we need to make a - // new multipart upload :sadface: - if len(w.parts) > 0 && int(w.parts[len(w.parts)-1].Size) < minChunkSize { - err := w.multi.Complete(w.parts) - if err != nil { - w.multi.Abort() - return 0, err - } - - multi, err := w.driver.Bucket.InitMulti(w.key, w.driver.getContentType(), getPermissions(), w.driver.getOptions()) - if err != nil { - return 0, err - } - w.multi = multi - - // If the entire written file is smaller than minChunkSize, we need to make - // a new part from scratch :double sad face: - if w.size < minChunkSize { - contents, err := w.driver.Bucket.Get(w.key) - if err != nil { - return 0, err - } - w.parts = nil - w.readyPart = contents - } else { - // Otherwise we can use the old file as the new first part - _, part, err := multi.PutPartCopy(1, s3.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) - if err != nil { - return 0, err - } - w.parts = []s3.Part{part} - } - } - - var n int - - for len(p) > 0 { - // If no parts are ready to write, fill up the first part - if neededBytes := int(w.driver.ChunkSize) - len(w.readyPart); neededBytes > 0 { - if len(p) >= neededBytes { - w.readyPart = append(w.readyPart, p[:neededBytes]...) - n += neededBytes - p = p[neededBytes:] - } else { - w.readyPart = append(w.readyPart, p...) - n += len(p) - p = nil - } - } - - if neededBytes := int(w.driver.ChunkSize) - len(w.pendingPart); neededBytes > 0 { - if len(p) >= neededBytes { - w.pendingPart = append(w.pendingPart, p[:neededBytes]...) - n += neededBytes - p = p[neededBytes:] - err := w.flushPart() - if err != nil { - w.size += int64(n) - return n, err - } - } else { - w.pendingPart = append(w.pendingPart, p...) - n += len(p) - p = nil - } - } - } - w.size += int64(n) - return n, nil -} - -func (w *writer) Size() int64 { - return w.size -} - -func (w *writer) Close() error { - if w.closed { - return fmt.Errorf("already closed") - } - w.closed = true - return w.flushPart() -} - -func (w *writer) Cancel() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } - w.cancelled = true - err := w.multi.Abort() - return err -} - -func (w *writer) Commit() error { - if w.closed { - return fmt.Errorf("already closed") - } else if w.committed { - return fmt.Errorf("already committed") - } else if w.cancelled { - return fmt.Errorf("already cancelled") - } - err := w.flushPart() - if err != nil { - return err - } - w.committed = true - err = w.multi.Complete(w.parts) - if err != nil { - w.multi.Abort() - return err - } - return nil -} - -// flushPart flushes buffers to write a part to S3. -// Only called by Write (with both buffers full) and Close/Commit (always) -func (w *writer) flushPart() error { - if len(w.readyPart) == 0 && len(w.pendingPart) == 0 { - // nothing to write - return nil - } - if len(w.pendingPart) < int(w.driver.ChunkSize) { - // closing with a small pending part - // combine ready and pending to avoid writing a small part - w.readyPart = append(w.readyPart, w.pendingPart...) - w.pendingPart = nil - } - - part, err := w.multi.PutPart(len(w.parts)+1, bytes.NewReader(w.readyPart)) - if err != nil { - return err - } - w.parts = append(w.parts, part) - w.readyPart = w.pendingPart - w.pendingPart = nil - return nil -} diff --git a/registry/storage/driver/s3-goamz/s3_test.go b/registry/storage/driver/s3-goamz/s3_test.go deleted file mode 100644 index 34bfb5d0..00000000 --- a/registry/storage/driver/s3-goamz/s3_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package s3 - -import ( - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/docker/distribution/context" - storagedriver "github.com/docker/distribution/registry/storage/driver" - "github.com/docker/distribution/registry/storage/driver/testsuites" - "github.com/docker/goamz/aws" - "github.com/docker/goamz/s3" - - "gopkg.in/check.v1" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -var s3DriverConstructor func(rootDirectory string, storageClass s3.StorageClass) (*Driver, error) -var skipS3 func() string - -func init() { - accessKey := os.Getenv("AWS_ACCESS_KEY") - secretKey := os.Getenv("AWS_SECRET_KEY") - bucket := os.Getenv("S3_BUCKET") - encrypt := os.Getenv("S3_ENCRYPT") - secure := os.Getenv("S3_SECURE") - v4auth := os.Getenv("S3_USE_V4_AUTH") - region := os.Getenv("AWS_REGION") - root, err := ioutil.TempDir("", "driver-") - if err != nil { - panic(err) - } - defer os.Remove(root) - - s3DriverConstructor = func(rootDirectory string, storageClass s3.StorageClass) (*Driver, error) { - encryptBool := false - if encrypt != "" { - encryptBool, err = strconv.ParseBool(encrypt) - if err != nil { - return nil, err - } - } - - secureBool := true - if secure != "" { - secureBool, err = strconv.ParseBool(secure) - if err != nil { - return nil, err - } - } - - v4AuthBool := false - if v4auth != "" { - v4AuthBool, err = strconv.ParseBool(v4auth) - if err != nil { - return nil, err - } - } - - parameters := DriverParameters{ - accessKey, - secretKey, - bucket, - aws.GetRegion(region), - encryptBool, - secureBool, - v4AuthBool, - minChunkSize, - rootDirectory, - storageClass, - driverName + "-test", - } - - return New(parameters) - } - - // Skip S3 storage driver tests if environment variable parameters are not provided - skipS3 = func() string { - if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { - return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" - } - return "" - } - - testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { - return s3DriverConstructor(root, s3.StandardStorage) - }, skipS3) -} - -func TestEmptyRootList(t *testing.T) { - if skipS3() != "" { - t.Skip(skipS3()) - } - - validRoot, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(validRoot) - - rootedDriver, err := s3DriverConstructor(validRoot, s3.StandardStorage) - if err != nil { - t.Fatalf("unexpected error creating rooted driver: %v", err) - } - - emptyRootDriver, err := s3DriverConstructor("", s3.StandardStorage) - if err != nil { - t.Fatalf("unexpected error creating empty root driver: %v", err) - } - - slashRootDriver, err := s3DriverConstructor("/", s3.StandardStorage) - if err != nil { - t.Fatalf("unexpected error creating slash root driver: %v", err) - } - - filename := "/test" - contents := []byte("contents") - ctx := context.Background() - err = rootedDriver.PutContent(ctx, filename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer rootedDriver.Delete(ctx, filename) - - keys, _ := emptyRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } - - keys, _ = slashRootDriver.List(ctx, "/") - for _, path := range keys { - if !storagedriver.PathRegexp.MatchString(path) { - t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) - } - } -} - -func TestStorageClass(t *testing.T) { - if skipS3() != "" { - t.Skip(skipS3()) - } - - rootDir, err := ioutil.TempDir("", "driver-") - if err != nil { - t.Fatalf("unexpected error creating temporary directory: %v", err) - } - defer os.Remove(rootDir) - - standardDriver, err := s3DriverConstructor(rootDir, s3.StandardStorage) - if err != nil { - t.Fatalf("unexpected error creating driver with standard storage: %v", err) - } - - rrDriver, err := s3DriverConstructor(rootDir, s3.ReducedRedundancy) - if err != nil { - t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) - } - - standardFilename := "/test-standard" - rrFilename := "/test-rr" - contents := []byte("contents") - ctx := context.Background() - - err = standardDriver.PutContent(ctx, standardFilename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer standardDriver.Delete(ctx, standardFilename) - - err = rrDriver.PutContent(ctx, rrFilename, contents) - if err != nil { - t.Fatalf("unexpected error creating content: %v", err) - } - defer rrDriver.Delete(ctx, rrFilename) - - standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) - resp, err := standardDriverUnwrapped.Bucket.GetResponse(standardDriverUnwrapped.s3Path(standardFilename)) - if err != nil { - t.Fatalf("unexpected error retrieving standard storage file: %v", err) - } - defer resp.Body.Close() - // Amazon only populates this header value for non-standard storage classes - if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != "" { - t.Fatalf("unexpected storage class for standard file: %v", storageClass) - } - - rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) - resp, err = rrDriverUnwrapped.Bucket.GetResponse(rrDriverUnwrapped.s3Path(rrFilename)) - if err != nil { - t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) - } - defer resp.Body.Close() - if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != string(s3.ReducedRedundancy) { - t.Fatalf("unexpected storage class for reduced-redundancy file: %v", storageClass) - } -} diff --git a/vendor.conf b/vendor.conf index df06259e..d0efe39d 100644 --- a/vendor.conf +++ b/vendor.conf @@ -10,7 +10,6 @@ github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab -github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c diff --git a/vendor/github.com/docker/goamz/LICENSE b/vendor/github.com/docker/goamz/LICENSE deleted file mode 100644 index 53320c35..00000000 --- a/vendor/github.com/docker/goamz/LICENSE +++ /dev/null @@ -1,185 +0,0 @@ -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/github.com/docker/goamz/README.md b/vendor/github.com/docker/goamz/README.md deleted file mode 100644 index 9d03b466..00000000 --- a/vendor/github.com/docker/goamz/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# News -We are putting together plans for future changes. We obviously depend on all of you to take part in the planning for the future of goamz and execution of the plans. Other than the regulare 'issues' and 'pull requests' please also have a look at TODO.md. - -It is inevitable that there will be backward-*in*compatible changes. Please subscribe to the google group to get all the news (it will only be used for announcements, all the technical discussions will happen on github). - -Google group: https://groups.google.com/forum/#!forum/goamz-announcements - - - -# GoAMZ - -[![Build Status](https://travis-ci.org/docker/goamz.png?branch=master)](https://travis-ci.org/docker/goamz) - -The _goamz_ package enables Go programs to interact with Amazon Web Services. - -This is a fork of the version [developed within Canonical](https://wiki.ubuntu.com/goamz) with additional functionality and services from [a number of contributors](https://github.com/docker/goamz/contributors)! - -The API of AWS is very comprehensive, though, and goamz doesn't even scratch the surface of it. That said, it's fairly well tested, and is the foundation in which further calls can easily be integrated. We'll continue extending the API as necessary - Pull Requests are _very_ welcome! - -The following packages are available at the moment: - -``` -github.com/docker/goamz/aws -github.com/docker/goamz/cloudwatch -github.com/docker/goamz/dynamodb -github.com/docker/goamz/ec2 -github.com/docker/goamz/elb -github.com/docker/goamz/iam -github.com/docker/goamz/kinesis -github.com/docker/goamz/s3 -github.com/docker/goamz/sqs -github.com/docker/goamz/sns - -github.com/docker/goamz/exp/mturk -github.com/docker/goamz/exp/sdb -github.com/docker/goamz/exp/ses -``` - -Packages under `exp/` are still in an experimental or unfinished/unpolished state. - -## API documentation - -The API documentation is currently available at: - -[http://godoc.org/github.com/docker/goamz](http://godoc.org/github.com/docker/goamz) - -## How to build and install goamz - -Just use `go get` with any of the available packages. For example: - -* `$ go get github.com/docker/goamz/ec2` -* `$ go get github.com/docker/goamz/s3` - -## Running tests - -To run tests, first install gocheck with: - -`$ go get launchpad.net/gocheck` - -Then run go test as usual: - -`$ go test github.com/docker/goamz/...` - -_Note:_ running all tests with the command `go test ./...` will currently fail as tests do not tear down their HTTP listeners. - -If you want to run integration tests (costs money), set up the EC2 environment variables as usual, and run: - -`$ gotest -i` diff --git a/vendor/github.com/docker/goamz/aws/attempt.go b/vendor/github.com/docker/goamz/aws/attempt.go deleted file mode 100644 index c0654f5d..00000000 --- a/vendor/github.com/docker/goamz/aws/attempt.go +++ /dev/null @@ -1,74 +0,0 @@ -package aws - -import ( - "time" -) - -// AttemptStrategy represents a strategy for waiting for an action -// to complete successfully. This is an internal type used by the -// implementation of other goamz packages. -type AttemptStrategy struct { - Total time.Duration // total duration of attempt. - Delay time.Duration // interval between each try in the burst. - Min int // minimum number of retries; overrides Total -} - -type Attempt struct { - strategy AttemptStrategy - last time.Time - end time.Time - force bool - count int -} - -// Start begins a new sequence of attempts for the given strategy. -func (s AttemptStrategy) Start() *Attempt { - now := time.Now() - return &Attempt{ - strategy: s, - last: now, - end: now.Add(s.Total), - force: true, - } -} - -// Next waits until it is time to perform the next attempt or returns -// false if it is time to stop trying. -func (a *Attempt) Next() bool { - now := time.Now() - sleep := a.nextSleep(now) - if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { - return false - } - a.force = false - if sleep > 0 && a.count > 0 { - time.Sleep(sleep) - now = time.Now() - } - a.count++ - a.last = now - return true -} - -func (a *Attempt) nextSleep(now time.Time) time.Duration { - sleep := a.strategy.Delay - now.Sub(a.last) - if sleep < 0 { - return 0 - } - return sleep -} - -// HasNext returns whether another attempt will be made if the current -// one fails. If it returns true, the following call to Next is -// guaranteed to return true. -func (a *Attempt) HasNext() bool { - if a.force || a.strategy.Min > a.count { - return true - } - now := time.Now() - if now.Add(a.nextSleep(now)).Before(a.end) { - a.force = true - return true - } - return false -} diff --git a/vendor/github.com/docker/goamz/aws/aws.go b/vendor/github.com/docker/goamz/aws/aws.go deleted file mode 100644 index ac8340ad..00000000 --- a/vendor/github.com/docker/goamz/aws/aws.go +++ /dev/null @@ -1,636 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// -// Written by Gustavo Niemeyer -// -package aws - -import ( - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "os/user" - "path" - "regexp" - "strings" - "time" -) - -// Regular expressions for INI files -var ( - iniSectionRegexp = regexp.MustCompile(`^\s*\[([^\[\]]+)\]\s*$`) - iniSettingRegexp = regexp.MustCompile(`^\s*(.+?)\s*=\s*(.*\S)\s*$`) -) - -// Defines the valid signers -const ( - V2Signature = iota - V4Signature = iota - Route53Signature = iota -) - -// Defines the service endpoint and correct Signer implementation to use -// to sign requests for this endpoint -type ServiceInfo struct { - Endpoint string - Signer uint -} - -// Region defines the URLs where AWS services may be accessed. -// -// See http://goo.gl/d8BP1 for more details. -type Region struct { - Name string // the canonical name of this region. - EC2Endpoint ServiceInfo - S3Endpoint string - S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name. - S3LocationConstraint bool // true if this region requires a LocationConstraint declaration. - S3LowercaseBucket bool // true if the region requires bucket names to be lower case. - SDBEndpoint string - SNSEndpoint string - SQSEndpoint string - SESEndpoint string - IAMEndpoint string - ELBEndpoint string - KMSEndpoint string - DynamoDBEndpoint string - CloudWatchServicepoint ServiceInfo - AutoScalingEndpoint string - RDSEndpoint ServiceInfo - KinesisEndpoint string - STSEndpoint string - CloudFormationEndpoint string - ElastiCacheEndpoint string -} - -var Regions = map[string]Region{ - APNortheast.Name: APNortheast, - APNortheast2.Name: APNortheast2, - APSoutheast.Name: APSoutheast, - APSoutheast2.Name: APSoutheast2, - EUCentral.Name: EUCentral, - EUWest.Name: EUWest, - USEast.Name: USEast, - USWest.Name: USWest, - USWest2.Name: USWest2, - USGovWest.Name: USGovWest, - SAEast.Name: SAEast, - CNNorth1.Name: CNNorth1, -} - -// Designates a signer interface suitable for signing AWS requests, params -// should be appropriately encoded for the request before signing. -// -// A signer should be initialized with Auth and the appropriate endpoint. -type Signer interface { - Sign(method, path string, params map[string]string) -} - -// An AWS Service interface with the API to query the AWS service -// -// Supplied as an easy way to mock out service calls during testing. -type AWSService interface { - // Queries the AWS service at a given method/path with the params and - // returns an http.Response and error - Query(method, path string, params map[string]string) (*http.Response, error) - // Builds an error given an XML payload in the http.Response, can be used - // to process an error if the status code is not 200 for example. - BuildError(r *http.Response) error -} - -// Implements a Server Query/Post API to easily query AWS services and build -// errors when desired -type Service struct { - service ServiceInfo - signer Signer -} - -// Create a base set of params for an action -func MakeParams(action string) map[string]string { - params := make(map[string]string) - params["Action"] = action - return params -} - -// Create a new AWS server to handle making requests -func NewService(auth Auth, service ServiceInfo) (s *Service, err error) { - var signer Signer - switch service.Signer { - case V2Signature: - signer, err = NewV2Signer(auth, service) - // case V4Signature: - // signer, err = NewV4Signer(auth, service, Regions["eu-west-1"]) - default: - err = fmt.Errorf("Unsupported signer for service") - } - if err != nil { - return - } - s = &Service{service: service, signer: signer} - return -} - -func (s *Service) Query(method, path string, params map[string]string) (resp *http.Response, err error) { - params["Timestamp"] = time.Now().UTC().Format(time.RFC3339) - u, err := url.Parse(s.service.Endpoint) - if err != nil { - return nil, err - } - u.Path = path - - s.signer.Sign(method, path, params) - if method == "GET" { - u.RawQuery = multimap(params).Encode() - resp, err = http.Get(u.String()) - } else if method == "POST" { - resp, err = http.PostForm(u.String(), multimap(params)) - } - - return -} - -func (s *Service) BuildError(r *http.Response) error { - errors := ErrorResponse{} - xml.NewDecoder(r.Body).Decode(&errors) - var err Error - err = errors.Errors - err.RequestId = errors.RequestId - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - return &err -} - -type ServiceError interface { - error - ErrorCode() string -} - -type ErrorResponse struct { - Errors Error `xml:"Error"` - RequestId string // A unique ID for tracking the request -} - -type Error struct { - StatusCode int - Type string - Code string - Message string - RequestId string -} - -func (err *Error) Error() string { - return fmt.Sprintf("Type: %s, Code: %s, Message: %s", - err.Type, err.Code, err.Message, - ) -} - -func (err *Error) ErrorCode() string { - return err.Code -} - -type Auth struct { - AccessKey, SecretKey string - token string - expiration time.Time -} - -func (a *Auth) Token() string { - if a.token == "" { - return "" - } - if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock - auth, err := GetAuth("", "", "", time.Time{}) - if err == nil { - *a = auth - } - } - return a.token -} - -func (a *Auth) Expiration() time.Time { - return a.expiration -} - -// To be used with other APIs that return auth credentials such as STS -func NewAuth(accessKey, secretKey, token string, expiration time.Time) *Auth { - return &Auth{ - AccessKey: accessKey, - SecretKey: secretKey, - token: token, - expiration: expiration, - } -} - -// ResponseMetadata -type ResponseMetadata struct { - RequestId string // A unique ID for tracking the request -} - -type BaseResponse struct { - ResponseMetadata ResponseMetadata -} - -var unreserved = make([]bool, 128) -var hex = "0123456789ABCDEF" - -func init() { - // RFC3986 - u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~" - for _, c := range u { - unreserved[c] = true - } -} - -func multimap(p map[string]string) url.Values { - q := make(url.Values, len(p)) - for k, v := range p { - q[k] = []string{v} - } - return q -} - -type credentials struct { - Code string - LastUpdated string - Type string - AccessKeyId string - SecretAccessKey string - Token string - Expiration string -} - -// GetMetaData retrieves instance metadata about the current machine. -// -// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details. -func GetMetaData(path string) (contents []byte, err error) { - c := http.Client{ - Transport: &http.Transport{ - Dial: func(netw, addr string) (net.Conn, error) { - deadline := time.Now().Add(5 * time.Second) - c, err := net.DialTimeout(netw, addr, time.Second*2) - if err != nil { - return nil, err - } - c.SetDeadline(deadline) - return c, nil - }, - }, - } - - url := "http://169.254.169.254/latest/meta-data/" + path - - resp, err := c.Get(url) - if err != nil { - return - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url) - return - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return - } - return []byte(body), err -} - -func GetRegion(regionName string) (region Region) { - region = Regions[regionName] - return -} - -// GetInstanceCredentials creates an Auth based on the instance's role credentials. -// If the running instance is not in EC2 or does not have a valid IAM role, an error will be returned. -// For more info about setting up IAM roles, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -func GetInstanceCredentials() (cred credentials, err error) { - credentialPath := "iam/security-credentials/" - - // Get the instance role - role, err := GetMetaData(credentialPath) - if err != nil { - return - } - - // Get the instance role credentials - credentialJSON, err := GetMetaData(credentialPath + string(role)) - if err != nil { - return - } - - err = json.Unmarshal([]byte(credentialJSON), &cred) - return -} - -// GetAuth creates an Auth based on either passed in credentials, -// environment information or instance based role credentials. -func GetAuth(accessKey string, secretKey, token string, expiration time.Time) (auth Auth, err error) { - // First try passed in credentials - if accessKey != "" && secretKey != "" { - return Auth{accessKey, secretKey, token, expiration}, nil - } - - // Next try to get auth from the environment - auth, err = EnvAuth() - if err == nil { - // Found auth, return - return - } - - // Next try getting auth from the instance role - cred, err := GetInstanceCredentials() - if err == nil { - // Found auth, return - auth.AccessKey = cred.AccessKeyId - auth.SecretKey = cred.SecretAccessKey - auth.token = cred.Token - exptdate, err := time.Parse("2006-01-02T15:04:05Z", cred.Expiration) - if err != nil { - err = fmt.Errorf("Error Parsing expiration date: cred.Expiration :%s , error: %s \n", cred.Expiration, err) - } - auth.expiration = exptdate - return auth, err - } - - // Next try getting auth from the credentials file - auth, err = CredentialFileAuth("", "", time.Minute*5) - if err == nil { - return - } - - //err = errors.New("No valid AWS authentication found") - err = fmt.Errorf("No valid AWS authentication found: %s", err) - return auth, err -} - -// EnvAuth creates an Auth based on environment information. -// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment -// variables are used. -func EnvAuth() (auth Auth, err error) { - auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") - if auth.AccessKey == "" { - auth.AccessKey = os.Getenv("AWS_ACCESS_KEY") - } - - auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") - if auth.SecretKey == "" { - auth.SecretKey = os.Getenv("AWS_SECRET_KEY") - } - if auth.AccessKey == "" { - err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") - } - if auth.SecretKey == "" { - err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") - } - return -} - -// CredentialFileAuth creates and Auth based on a credentials file. The file -// contains various authentication profiles for use with AWS. -// -// The credentials file, which is used by other AWS SDKs, is documented at -// http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs -func CredentialFileAuth(filePath string, profile string, expiration time.Duration) (auth Auth, err error) { - if profile == "" { - profile = os.Getenv("AWS_DEFAULT_PROFILE") - if profile == "" { - profile = os.Getenv("AWS_PROFILE") - if profile == "" { - profile = "default" - } - } - } - - if filePath == "" { - u, err := user.Current() - if err != nil { - return auth, err - } - - filePath = path.Join(u.HomeDir, ".aws", "credentials") - } - - // read the file, then parse the INI - contents, err := ioutil.ReadFile(filePath) - if err != nil { - return - } - - profiles := parseINI(string(contents)) - profileData, ok := profiles[profile] - - if !ok { - err = errors.New("The credentials file did not contain the profile") - return - } - - keyId, ok := profileData["aws_access_key_id"] - if !ok { - err = errors.New("The credentials file did not contain required attribute aws_access_key_id") - return - } - - secretKey, ok := profileData["aws_secret_access_key"] - if !ok { - err = errors.New("The credentials file did not contain required attribute aws_secret_access_key") - return - } - - auth.AccessKey = keyId - auth.SecretKey = secretKey - - if token, ok := profileData["aws_session_token"]; ok { - auth.token = token - } - - auth.expiration = time.Now().Add(expiration) - - return -} - -// parseINI takes the contents of a credentials file and returns a map, whose keys -// are the various profiles, and whose values are maps of the settings for the -// profiles -func parseINI(fileContents string) map[string]map[string]string { - profiles := make(map[string]map[string]string) - - lines := strings.Split(fileContents, "\n") - - var currentSection map[string]string - for _, line := range lines { - // remove comments, which start with a semi-colon - if split := strings.Split(line, ";"); len(split) > 1 { - line = split[0] - } - - // check if the line is the start of a profile. - // - // for example: - // [default] - // - // otherwise, check for the proper setting - // property=value - if sectMatch := iniSectionRegexp.FindStringSubmatch(line); len(sectMatch) == 2 { - currentSection = make(map[string]string) - profiles[sectMatch[1]] = currentSection - } else if setMatch := iniSettingRegexp.FindStringSubmatch(line); len(setMatch) == 3 && currentSection != nil { - currentSection[setMatch[1]] = setMatch[2] - } - } - - return profiles -} - -// Encode takes a string and URI-encodes it in a way suitable -// to be used in AWS signatures. -func Encode(s string) string { - encode := false - for i := 0; i != len(s); i++ { - c := s[i] - if c > 127 || !unreserved[c] { - encode = true - break - } - } - if !encode { - return s - } - e := make([]byte, len(s)*3) - ei := 0 - for i := 0; i != len(s); i++ { - c := s[i] - if c > 127 || !unreserved[c] { - e[ei] = '%' - e[ei+1] = hex[c>>4] - e[ei+2] = hex[c&0xF] - ei += 3 - } else { - e[ei] = c - ei += 1 - } - } - return string(e[:ei]) -} - -func dialTimeout(network, addr string) (net.Conn, error) { - return net.DialTimeout(network, addr, time.Duration(2*time.Second)) -} - -func AvailabilityZone() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/placement/availability-zone") - if err != nil { - return "unknown" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "unknown" - } else { - return string(body) - } - } -} - -func InstanceRegion() string { - az := AvailabilityZone() - if az == "unknown" { - return az - } else { - region := az[:len(az)-1] - return region - } -} - -func InstanceId() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-id") - if err != nil { - return "unknown" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "unknown" - } else { - return string(body) - } - } -} - -func InstanceType() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-type") - if err != nil { - return "unknown" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "unknown" - } else { - return string(body) - } - } -} - -func ServerLocalIp() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/local-ipv4") - if err != nil { - return "127.0.0.1" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "127.0.0.1" - } else { - return string(body) - } - } -} - -func ServerPublicIp() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/public-ipv4") - if err != nil { - return "127.0.0.1" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "127.0.0.1" - } else { - return string(body) - } - } -} diff --git a/vendor/github.com/docker/goamz/aws/client.go b/vendor/github.com/docker/goamz/aws/client.go deleted file mode 100644 index 86d2ccec..00000000 --- a/vendor/github.com/docker/goamz/aws/client.go +++ /dev/null @@ -1,124 +0,0 @@ -package aws - -import ( - "math" - "net" - "net/http" - "time" -) - -type RetryableFunc func(*http.Request, *http.Response, error) bool -type WaitFunc func(try int) -type DeadlineFunc func() time.Time - -type ResilientTransport struct { - // Timeout is the maximum amount of time a dial will wait for - // a connect to complete. - // - // The default is no timeout. - // - // With or without a timeout, the operating system may impose - // its own earlier timeout. For instance, TCP timeouts are - // often around 3 minutes. - DialTimeout time.Duration - - // MaxTries, if non-zero, specifies the number of times we will retry on - // failure. Retries are only attempted for temporary network errors or known - // safe failures. - MaxTries int - Deadline DeadlineFunc - ShouldRetry RetryableFunc - Wait WaitFunc - transport *http.Transport -} - -// Convenience method for creating an http client -func NewClient(rt *ResilientTransport) *http.Client { - rt.transport = &http.Transport{ - Dial: func(netw, addr string) (net.Conn, error) { - c, err := net.DialTimeout(netw, addr, rt.DialTimeout) - if err != nil { - return nil, err - } - c.SetDeadline(rt.Deadline()) - return c, nil - }, - Proxy: http.ProxyFromEnvironment, - } - // TODO: Would be nice is ResilientTransport allowed clients to initialize - // with http.Transport attributes. - return &http.Client{ - Transport: rt, - } -} - -var retryingTransport = &ResilientTransport{ - Deadline: func() time.Time { - return time.Now().Add(5 * time.Second) - }, - DialTimeout: 10 * time.Second, - MaxTries: 3, - ShouldRetry: awsRetry, - Wait: ExpBackoff, -} - -// Exported default client -var RetryingClient = NewClient(retryingTransport) - -func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.tries(req) -} - -// Retry a request a maximum of t.MaxTries times. -// We'll only retry if the proper criteria are met. -// If a wait function is specified, wait that amount of time -// In between requests. -func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) { - for try := 0; try < t.MaxTries; try += 1 { - res, err = t.transport.RoundTrip(req) - - if !t.ShouldRetry(req, res, err) { - break - } - if res != nil { - res.Body.Close() - } - if t.Wait != nil { - t.Wait(try) - } - } - - return -} - -func ExpBackoff(try int) { - time.Sleep(100 * time.Millisecond * - time.Duration(math.Exp2(float64(try)))) -} - -func LinearBackoff(try int) { - time.Sleep(time.Duration(try*100) * time.Millisecond) -} - -// Decide if we should retry a request. -// In general, the criteria for retrying a request is described here -// http://docs.aws.amazon.com/general/latest/gr/api-retries.html -func awsRetry(req *http.Request, res *http.Response, err error) bool { - retry := false - - // Retry if there's a temporary network error. - if neterr, ok := err.(net.Error); ok { - if neterr.Temporary() { - retry = true - } - } - - // Retry if we get a 5xx series error. - if res != nil { - if res.StatusCode >= 500 && res.StatusCode < 600 { - retry = true - } - } - - return retry -} diff --git a/vendor/github.com/docker/goamz/aws/regions.go b/vendor/github.com/docker/goamz/aws/regions.go deleted file mode 100644 index 0406648d..00000000 --- a/vendor/github.com/docker/goamz/aws/regions.go +++ /dev/null @@ -1,289 +0,0 @@ -package aws - -var USGovWest = Region{ - "us-gov-west-1", - ServiceInfo{"https://ec2.us-gov-west-1.amazonaws.com", V2Signature}, - "https://s3-fips-us-gov-west-1.amazonaws.com", - "", - true, - true, - "", - "https://sns.us-gov-west-1.amazonaws.com", - "https://sqs.us-gov-west-1.amazonaws.com", - "", - "https://iam.us-gov.amazonaws.com", - "https://elasticloadbalancing.us-gov-west-1.amazonaws.com", - "", - "https://dynamodb.us-gov-west-1.amazonaws.com", - ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature}, - "https://autoscaling.us-gov-west-1.amazonaws.com", - ServiceInfo{"https://rds.us-gov-west-1.amazonaws.com", V2Signature}, - "", - "https://sts.amazonaws.com", - "https://cloudformation.us-gov-west-1.amazonaws.com", - "", -} - -var USEast = Region{ - "us-east-1", - ServiceInfo{"https://ec2.us-east-1.amazonaws.com", V2Signature}, - "https://s3-external-1.amazonaws.com", - "", - false, - false, - "https://sdb.amazonaws.com", - "https://sns.us-east-1.amazonaws.com", - "https://sqs.us-east-1.amazonaws.com", - "https://email.us-east-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-east-1.amazonaws.com", - "https://kms.us-east-1.amazonaws.com", - "https://dynamodb.us-east-1.amazonaws.com", - ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature}, - "https://autoscaling.us-east-1.amazonaws.com", - ServiceInfo{"https://rds.us-east-1.amazonaws.com", V2Signature}, - "https://kinesis.us-east-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.us-east-1.amazonaws.com", - "https://elasticache.us-east-1.amazonaws.com", -} - -var USWest = Region{ - "us-west-1", - ServiceInfo{"https://ec2.us-west-1.amazonaws.com", V2Signature}, - "https://s3-us-west-1.amazonaws.com", - "", - true, - true, - "https://sdb.us-west-1.amazonaws.com", - "https://sns.us-west-1.amazonaws.com", - "https://sqs.us-west-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-west-1.amazonaws.com", - "https://kms.us-west-1.amazonaws.com", - "https://dynamodb.us-west-1.amazonaws.com", - ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature}, - "https://autoscaling.us-west-1.amazonaws.com", - ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature}, - "https://kinesis.us-west-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.us-west-1.amazonaws.com", - "https://elasticache.us-west-1.amazonaws.com", -} - -var USWest2 = Region{ - "us-west-2", - ServiceInfo{"https://ec2.us-west-2.amazonaws.com", V2Signature}, - "https://s3-us-west-2.amazonaws.com", - "", - true, - true, - "https://sdb.us-west-2.amazonaws.com", - "https://sns.us-west-2.amazonaws.com", - "https://sqs.us-west-2.amazonaws.com", - "https://email.us-west-2.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-west-2.amazonaws.com", - "https://kms.us-west-2.amazonaws.com", - "https://dynamodb.us-west-2.amazonaws.com", - ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature}, - "https://autoscaling.us-west-2.amazonaws.com", - ServiceInfo{"https://rds.us-west-2.amazonaws.com", V2Signature}, - "https://kinesis.us-west-2.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.us-west-2.amazonaws.com", - "https://elasticache.us-west-2.amazonaws.com", -} - -var EUWest = Region{ - "eu-west-1", - ServiceInfo{"https://ec2.eu-west-1.amazonaws.com", V2Signature}, - "https://s3-eu-west-1.amazonaws.com", - "", - true, - true, - "https://sdb.eu-west-1.amazonaws.com", - "https://sns.eu-west-1.amazonaws.com", - "https://sqs.eu-west-1.amazonaws.com", - "https://email.eu-west-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.eu-west-1.amazonaws.com", - "https://kms.eu-west-1.amazonaws.com", - "https://dynamodb.eu-west-1.amazonaws.com", - ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature}, - "https://autoscaling.eu-west-1.amazonaws.com", - ServiceInfo{"https://rds.eu-west-1.amazonaws.com", V2Signature}, - "https://kinesis.eu-west-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.eu-west-1.amazonaws.com", - "https://elasticache.eu-west-1.amazonaws.com", -} - -var EUCentral = Region{ - "eu-central-1", - ServiceInfo{"https://ec2.eu-central-1.amazonaws.com", V4Signature}, - "https://s3-eu-central-1.amazonaws.com", - "", - true, - true, - "https://sdb.eu-central-1.amazonaws.com", - "https://sns.eu-central-1.amazonaws.com", - "https://sqs.eu-central-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.eu-central-1.amazonaws.com", - "https://kms.eu-central-1.amazonaws.com", - "https://dynamodb.eu-central-1.amazonaws.com", - ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature}, - "https://autoscaling.eu-central-1.amazonaws.com", - ServiceInfo{"https://rds.eu-central-1.amazonaws.com", V2Signature}, - "https://kinesis.eu-central-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.eu-central-1.amazonaws.com", - "", -} - -var APSoutheast = Region{ - "ap-southeast-1", - ServiceInfo{"https://ec2.ap-southeast-1.amazonaws.com", V2Signature}, - "https://s3-ap-southeast-1.amazonaws.com", - "", - true, - true, - "https://sdb.ap-southeast-1.amazonaws.com", - "https://sns.ap-southeast-1.amazonaws.com", - "https://sqs.ap-southeast-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-southeast-1.amazonaws.com", - "https://kms.ap-southeast-1.amazonaws.com", - "https://dynamodb.ap-southeast-1.amazonaws.com", - ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature}, - "https://autoscaling.ap-southeast-1.amazonaws.com", - ServiceInfo{"https://rds.ap-southeast-1.amazonaws.com", V2Signature}, - "https://kinesis.ap-southeast-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.ap-southeast-1.amazonaws.com", - "https://elasticache.ap-southeast-1.amazonaws.com", -} - -var APSoutheast2 = Region{ - "ap-southeast-2", - ServiceInfo{"https://ec2.ap-southeast-2.amazonaws.com", V2Signature}, - "https://s3-ap-southeast-2.amazonaws.com", - "", - true, - true, - "https://sdb.ap-southeast-2.amazonaws.com", - "https://sns.ap-southeast-2.amazonaws.com", - "https://sqs.ap-southeast-2.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-southeast-2.amazonaws.com", - "https://kms.ap-southeast-2.amazonaws.com", - "https://dynamodb.ap-southeast-2.amazonaws.com", - ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature}, - "https://autoscaling.ap-southeast-2.amazonaws.com", - ServiceInfo{"https://rds.ap-southeast-2.amazonaws.com", V2Signature}, - "https://kinesis.ap-southeast-2.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.ap-southeast-2.amazonaws.com", - "https://elasticache.ap-southeast-2.amazonaws.com", -} - -var APNortheast = Region{ - "ap-northeast-1", - ServiceInfo{"https://ec2.ap-northeast-1.amazonaws.com", V2Signature}, - "https://s3-ap-northeast-1.amazonaws.com", - "", - true, - true, - "https://sdb.ap-northeast-1.amazonaws.com", - "https://sns.ap-northeast-1.amazonaws.com", - "https://sqs.ap-northeast-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-northeast-1.amazonaws.com", - "https://kms.ap-northeast-1.amazonaws.com", - "https://dynamodb.ap-northeast-1.amazonaws.com", - ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature}, - "https://autoscaling.ap-northeast-1.amazonaws.com", - ServiceInfo{"https://rds.ap-northeast-1.amazonaws.com", V2Signature}, - "https://kinesis.ap-northeast-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.ap-northeast-1.amazonaws.com", - "https://elasticache.ap-northeast-1.amazonaws.com", -} - -var APNortheast2 = Region{ - "ap-northeast-2", - ServiceInfo{"https://ec2.ap-northeast-2.amazonaws.com", V2Signature}, - "https://s3-ap-northeast-2.amazonaws.com", - "", - true, - true, - "", - "https://sns.ap-northeast-2.amazonaws.com", - "https://sqs.ap-northeast-2.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-northeast-2.amazonaws.com", - "https://kms.ap-northeast-2.amazonaws.com", - "https://dynamodb.ap-northeast-2.amazonaws.com", - ServiceInfo{"https://monitoring.ap-northeast-2.amazonaws.com", V2Signature}, - "https://autoscaling.ap-northeast-2.amazonaws.com", - ServiceInfo{"https://rds.ap-northeast-2.amazonaws.com", V2Signature}, - "https://kinesis.ap-northeast-2.amazonaws.com", - "https://sts.ap-northeast-2.amazonaws.com", - "https://cloudformation.ap-northeast-2.amazonaws.com", - "https://elasticache.ap-northeast-2.amazonaws.com", -} - -var SAEast = Region{ - "sa-east-1", - ServiceInfo{"https://ec2.sa-east-1.amazonaws.com", V2Signature}, - "https://s3-sa-east-1.amazonaws.com", - "", - true, - true, - "https://sdb.sa-east-1.amazonaws.com", - "https://sns.sa-east-1.amazonaws.com", - "https://sqs.sa-east-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.sa-east-1.amazonaws.com", - "https://kms.sa-east-1.amazonaws.com", - "https://dynamodb.sa-east-1.amazonaws.com", - ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature}, - "https://autoscaling.sa-east-1.amazonaws.com", - ServiceInfo{"https://rds.sa-east-1.amazonaws.com", V2Signature}, - "", - "https://sts.amazonaws.com", - "https://cloudformation.sa-east-1.amazonaws.com", - "https://elasticache.sa-east-1.amazonaws.com", -} - -var CNNorth1 = Region{ - "cn-north-1", - ServiceInfo{"https://ec2.cn-north-1.amazonaws.com.cn", V2Signature}, - "https://s3.cn-north-1.amazonaws.com.cn", - "", - true, - true, - "", - "https://sns.cn-north-1.amazonaws.com.cn", - "https://sqs.cn-north-1.amazonaws.com.cn", - "", - "https://iam.cn-north-1.amazonaws.com.cn", - "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn", - "", - "https://dynamodb.cn-north-1.amazonaws.com.cn", - ServiceInfo{"https://monitoring.cn-north-1.amazonaws.com.cn", V4Signature}, - "https://autoscaling.cn-north-1.amazonaws.com.cn", - ServiceInfo{"https://rds.cn-north-1.amazonaws.com.cn", V4Signature}, - "", - "https://sts.cn-north-1.amazonaws.com.cn", - "", - "", -} diff --git a/vendor/github.com/docker/goamz/aws/retry.go b/vendor/github.com/docker/goamz/aws/retry.go deleted file mode 100644 index bea964b9..00000000 --- a/vendor/github.com/docker/goamz/aws/retry.go +++ /dev/null @@ -1,136 +0,0 @@ -package aws - -import ( - "math/rand" - "net" - "net/http" - "time" -) - -const ( - maxDelay = 20 * time.Second - defaultScale = 300 * time.Millisecond - throttlingScale = 500 * time.Millisecond - throttlingScaleRange = throttlingScale / 4 - defaultMaxRetries = 3 - dynamoDBScale = 25 * time.Millisecond - dynamoDBMaxRetries = 10 -) - -// A RetryPolicy encapsulates a strategy for implementing client retries. -// -// Default implementations are provided which match the AWS SDKs. -type RetryPolicy interface { - // ShouldRetry returns whether a client should retry a failed request. - ShouldRetry(target string, r *http.Response, err error, numRetries int) bool - - // Delay returns the time a client should wait before issuing a retry. - Delay(target string, r *http.Response, err error, numRetries int) time.Duration -} - -// DefaultRetryPolicy implements the AWS SDK default retry policy. -// -// It will retry up to 3 times, and uses an exponential backoff with a scale -// factor of 300ms (300ms, 600ms, 1200ms). If the retry is because of -// throttling, the delay will also include some randomness. -// -// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L90. -type DefaultRetryPolicy struct { -} - -// ShouldRetry implements the RetryPolicy ShouldRetry method. -func (policy DefaultRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { - return shouldRetry(r, err, numRetries, defaultMaxRetries) -} - -// Delay implements the RetryPolicy Delay method. -func (policy DefaultRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { - scale := defaultScale - if err, ok := err.(*Error); ok && isThrottlingException(err) { - scale = throttlingScale + time.Duration(rand.Int63n(int64(throttlingScaleRange))) - } - return exponentialBackoff(numRetries, scale) -} - -// DynamoDBRetryPolicy implements the AWS SDK DynamoDB retry policy. -// -// It will retry up to 10 times, and uses an exponential backoff with a scale -// factor of 25ms (25ms, 50ms, 100ms, ...). -// -// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L103. -type DynamoDBRetryPolicy struct { -} - -// ShouldRetry implements the RetryPolicy ShouldRetry method. -func (policy DynamoDBRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { - return shouldRetry(r, err, numRetries, dynamoDBMaxRetries) -} - -// Delay implements the RetryPolicy Delay method. -func (policy DynamoDBRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { - return exponentialBackoff(numRetries, dynamoDBScale) -} - -// NeverRetryPolicy never retries requests and returns immediately on failure. -type NeverRetryPolicy struct { -} - -// ShouldRetry implements the RetryPolicy ShouldRetry method. -func (policy NeverRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { - return false -} - -// Delay implements the RetryPolicy Delay method. -func (policy NeverRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { - return time.Duration(0) -} - -// shouldRetry determines if we should retry the request. -// -// See http://docs.aws.amazon.com/general/latest/gr/api-retries.html. -func shouldRetry(r *http.Response, err error, numRetries int, maxRetries int) bool { - // Once we've exceeded the max retry attempts, game over. - if numRetries >= maxRetries { - return false - } - - // Always retry temporary network errors. - if err, ok := err.(net.Error); ok && err.Temporary() { - return true - } - - // Always retry 5xx responses. - if r != nil && r.StatusCode >= 500 { - return true - } - - // Always retry throttling exceptions. - if err, ok := err.(ServiceError); ok && isThrottlingException(err) { - return true - } - - // Other classes of failures indicate a problem with the request. Retrying - // won't help. - return false -} - -func exponentialBackoff(numRetries int, scale time.Duration) time.Duration { - if numRetries < 0 { - return time.Duration(0) - } - - delay := (1 << uint(numRetries)) * scale - if delay > maxDelay { - return maxDelay - } - return delay -} - -func isThrottlingException(err ServiceError) bool { - switch err.ErrorCode() { - case "Throttling", "ThrottlingException", "ProvisionedThroughputExceededException": - return true - default: - return false - } -} diff --git a/vendor/github.com/docker/goamz/aws/sign.go b/vendor/github.com/docker/goamz/aws/sign.go deleted file mode 100644 index 8b53e8e8..00000000 --- a/vendor/github.com/docker/goamz/aws/sign.go +++ /dev/null @@ -1,472 +0,0 @@ -package aws - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "path" - "sort" - "strings" - "time" -) - -// AWS specifies that the parameters in a signed request must -// be provided in the natural order of the keys. This is distinct -// from the natural order of the encoded value of key=value. -// Percent and gocheck.Equals affect the sorting order. -func EncodeSorted(values url.Values) string { - // preallocate the arrays for perfomance - keys := make([]string, 0, len(values)) - sarray := make([]string, 0, len(values)) - for k := range values { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - for _, v := range values[k] { - sarray = append(sarray, Encode(k)+"="+Encode(v)) - } - } - - return strings.Join(sarray, "&") -} - -type V2Signer struct { - auth Auth - service ServiceInfo - host string -} - -var b64 = base64.StdEncoding - -func NewV2Signer(auth Auth, service ServiceInfo) (*V2Signer, error) { - u, err := url.Parse(service.Endpoint) - if err != nil { - return nil, err - } - return &V2Signer{auth: auth, service: service, host: u.Host}, nil -} - -func (s *V2Signer) Sign(method, path string, params map[string]string) { - params["AWSAccessKeyId"] = s.auth.AccessKey - params["SignatureVersion"] = "2" - params["SignatureMethod"] = "HmacSHA256" - if s.auth.Token() != "" { - params["SecurityToken"] = s.auth.Token() - } - // AWS specifies that the parameters in a signed request must - // be provided in the natural order of the keys. This is distinct - // from the natural order of the encoded value of key=value. - // Percent and gocheck.Equals affect the sorting order. - var keys, sarray []string - for k := range params { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - sarray = append(sarray, Encode(k)+"="+Encode(params[k])) - } - joined := strings.Join(sarray, "&") - payload := method + "\n" + s.host + "\n" + path + "\n" + joined - hash := hmac.New(sha256.New, []byte(s.auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - params["Signature"] = string(signature) -} - -func (s *V2Signer) SignRequest(req *http.Request) error { - req.ParseForm() - req.Form.Set("AWSAccessKeyId", s.auth.AccessKey) - req.Form.Set("SignatureVersion", "2") - req.Form.Set("SignatureMethod", "HmacSHA256") - if s.auth.Token() != "" { - req.Form.Set("SecurityToken", s.auth.Token()) - } - - payload := req.Method + "\n" + req.URL.Host + "\n" + req.URL.Path + "\n" + EncodeSorted(req.Form) - hash := hmac.New(sha256.New, []byte(s.auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - req.Form.Set("Signature", string(signature)) - - req.URL.RawQuery = req.Form.Encode() - - return nil -} - -// Common date formats for signing requests -const ( - ISO8601BasicFormat = "20060102T150405Z" - ISO8601BasicFormatShort = "20060102" -) - -type Route53Signer struct { - auth Auth -} - -func NewRoute53Signer(auth Auth) *Route53Signer { - return &Route53Signer{auth: auth} -} - -// Creates the authorize signature based on the date stamp and secret key -func (s *Route53Signer) getHeaderAuthorize(message string) string { - hmacSha256 := hmac.New(sha256.New, []byte(s.auth.SecretKey)) - hmacSha256.Write([]byte(message)) - cryptedString := hmacSha256.Sum(nil) - - return base64.StdEncoding.EncodeToString(cryptedString) -} - -// Adds all the required headers for AWS Route53 API to the request -// including the authorization -func (s *Route53Signer) Sign(req *http.Request) { - date := time.Now().UTC().Format(time.RFC1123) - delete(req.Header, "Date") - req.Header.Set("Date", date) - - authHeader := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s", - s.auth.AccessKey, "HmacSHA256", s.getHeaderAuthorize(date)) - - req.Header.Set("Host", req.Host) - req.Header.Set("X-Amzn-Authorization", authHeader) - req.Header.Set("Content-Type", "application/xml") - if s.auth.Token() != "" { - req.Header.Set("X-Amz-Security-Token", s.auth.Token()) - } -} - -/* -The V4Signer encapsulates all of the functionality to sign a request with the AWS -Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) -*/ -type V4Signer struct { - auth Auth - serviceName string - region Region - // Add the x-amz-content-sha256 header - IncludeXAmzContentSha256 bool -} - -/* -Return a new instance of a V4Signer capable of signing AWS requests. -*/ -func NewV4Signer(auth Auth, serviceName string, region Region) *V4Signer { - return &V4Signer{ - auth: auth, - serviceName: serviceName, - region: region, - IncludeXAmzContentSha256: false, - } -} - -/* -Sign a request according to the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) - -The signed request will include an "x-amz-date" header with a current timestamp if a valid "x-amz-date" -or "date" header was not available in the original request. In addition, AWS Signature Version 4 requires -the "host" header to be a signed header, therefor the Sign method will manually set a "host" header from -the request.Host. - -The signed request will include a new "Authorization" header indicating that the request has been signed. - -Any changes to the request after signing the request will invalidate the signature. -*/ -func (s *V4Signer) Sign(req *http.Request) { - req.Header.Set("host", req.Host) // host header must be included as a signed header - t := s.requestTime(req) // Get request time - - payloadHash := "" - - if _, ok := req.Form["X-Amz-Expires"]; ok { - // We are authenticating the the request by using query params - // (also known as pre-signing a url, http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) - payloadHash = "UNSIGNED-PAYLOAD" - req.Header.Del("x-amz-date") - - req.Form["X-Amz-SignedHeaders"] = []string{s.signedHeaders(req.Header)} - req.Form["X-Amz-Algorithm"] = []string{"AWS4-HMAC-SHA256"} - req.Form["X-Amz-Credential"] = []string{s.auth.AccessKey + "/" + s.credentialScope(t)} - req.Form["X-Amz-Date"] = []string{t.Format(ISO8601BasicFormat)} - req.URL.RawQuery = req.Form.Encode() - } else { - payloadHash = s.payloadHash(req) - if s.IncludeXAmzContentSha256 { - req.Header.Set("x-amz-content-sha256", payloadHash) // x-amz-content-sha256 contains the payload hash - } - } - creq := s.canonicalRequest(req, payloadHash) // Build canonical request - sts := s.stringToSign(t, creq) // Build string to sign - signature := s.signature(t, sts) // Calculate the AWS Signature Version 4 - auth := s.authorization(req.Header, t, signature) // Create Authorization header value - - if _, ok := req.Form["X-Amz-Expires"]; ok { - req.Form["X-Amz-Signature"] = []string{signature} - } else { - req.Header.Set("Authorization", auth) // Add Authorization header to request - } - return -} - -func (s *V4Signer) SignRequest(req *http.Request) error { - s.Sign(req) - return nil -} - -/* -requestTime method will parse the time from the request "x-amz-date" or "date" headers. -If the "x-amz-date" header is present, that will take priority over the "date" header. -If neither header is defined or we are unable to parse either header as a valid date -then we will create a new "x-amz-date" header with the current time. -*/ -func (s *V4Signer) requestTime(req *http.Request) time.Time { - - // Get "x-amz-date" header - date := req.Header.Get("x-amz-date") - - // Attempt to parse as ISO8601BasicFormat - t, err := time.Parse(ISO8601BasicFormat, date) - if err == nil { - return t - } - - // Attempt to parse as http.TimeFormat - t, err = time.Parse(http.TimeFormat, date) - if err == nil { - req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) - return t - } - - // Get "date" header - date = req.Header.Get("date") - - // Attempt to parse as http.TimeFormat - t, err = time.Parse(http.TimeFormat, date) - if err == nil { - return t - } - - // Create a current time header to be used - t = time.Now().UTC() - req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) - return t -} - -/* -canonicalRequest method creates the canonical request according to Task 1 of the AWS Signature Version 4 Signing Process. (http://goo.gl/eUUZ3S) - - CanonicalRequest = - HTTPRequestMethod + '\n' + - CanonicalURI + '\n' + - CanonicalQueryString + '\n' + - CanonicalHeaders + '\n' + - SignedHeaders + '\n' + - HexEncode(Hash(Payload)) - -payloadHash is optional; use the empty string and it will be calculated from the request -*/ -func (s *V4Signer) canonicalRequest(req *http.Request, payloadHash string) string { - if payloadHash == "" { - payloadHash = s.payloadHash(req) - } - c := new(bytes.Buffer) - fmt.Fprintf(c, "%s\n", req.Method) - fmt.Fprintf(c, "%s\n", s.canonicalURI(req.URL)) - fmt.Fprintf(c, "%s\n", s.canonicalQueryString(req.URL)) - fmt.Fprintf(c, "%s\n\n", s.canonicalHeaders(req.Header)) - fmt.Fprintf(c, "%s\n", s.signedHeaders(req.Header)) - fmt.Fprintf(c, "%s", payloadHash) - return c.String() -} - -func (s *V4Signer) canonicalURI(u *url.URL) string { - u = &url.URL{Path: u.Path} - canonicalPath := u.String() - - slash := strings.HasSuffix(canonicalPath, "/") - canonicalPath = path.Clean(canonicalPath) - - if canonicalPath == "" || canonicalPath == "." { - canonicalPath = "/" - } - - if canonicalPath != "/" && slash { - canonicalPath += "/" - } - - return canonicalPath -} - -func (s *V4Signer) canonicalQueryString(u *url.URL) string { - keyValues := make(map[string]string, len(u.Query())) - keys := make([]string, len(u.Query())) - - key_i := 0 - for k, vs := range u.Query() { - k = url.QueryEscape(k) - - a := make([]string, len(vs)) - for idx, v := range vs { - v = url.QueryEscape(v) - a[idx] = fmt.Sprintf("%s=%s", k, v) - } - - keyValues[k] = strings.Join(a, "&") - keys[key_i] = k - key_i++ - } - - sort.Strings(keys) - - query := make([]string, len(keys)) - for idx, key := range keys { - query[idx] = keyValues[key] - } - - query_str := strings.Join(query, "&") - - // AWS V4 signing requires that the space characters - // are encoded as %20 instead of +. On the other hand - // golangs url.QueryEscape as well as url.Values.Encode() - // both encode the space as a + character. See: - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - // https://github.com/golang/go/issues/4013 - // https://groups.google.com/forum/#!topic/golang-nuts/BB443qEjPIk - - return strings.Replace(query_str, "+", "%20", -1) -} - -func (s *V4Signer) canonicalHeaders(h http.Header) string { - i, a, lowerCase := 0, make([]string, len(h)), make(map[string][]string) - - for k, v := range h { - lowerCase[strings.ToLower(k)] = v - } - - var keys []string - for k := range lowerCase { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := lowerCase[k] - for j, w := range v { - v[j] = strings.Trim(w, " ") - } - sort.Strings(v) - a[i] = strings.ToLower(k) + ":" + strings.Join(v, ",") - i++ - } - return strings.Join(a, "\n") -} - -func (s *V4Signer) signedHeaders(h http.Header) string { - i, a := 0, make([]string, len(h)) - for k := range h { - a[i] = strings.ToLower(k) - i++ - } - sort.Strings(a) - return strings.Join(a, ";") -} - -func (s *V4Signer) payloadHash(req *http.Request) string { - var b []byte - if req.Body == nil { - b = []byte("") - } else { - var err error - b, err = ioutil.ReadAll(req.Body) - if err != nil { - // TODO: I REALLY DON'T LIKE THIS PANIC!!!! - panic(err) - } - } - req.Body = ioutil.NopCloser(bytes.NewBuffer(b)) - return s.hash(string(b)) -} - -/* -stringToSign method creates the string to sign accorting to Task 2 of the AWS Signature Version 4 Signing Process. (http://goo.gl/es1PAu) - - StringToSign = - Algorithm + '\n' + - RequestDate + '\n' + - CredentialScope + '\n' + - HexEncode(Hash(CanonicalRequest)) -*/ -func (s *V4Signer) stringToSign(t time.Time, creq string) string { - w := new(bytes.Buffer) - fmt.Fprint(w, "AWS4-HMAC-SHA256\n") - fmt.Fprintf(w, "%s\n", t.Format(ISO8601BasicFormat)) - fmt.Fprintf(w, "%s\n", s.credentialScope(t)) - fmt.Fprintf(w, "%s", s.hash(creq)) - return w.String() -} - -func (s *V4Signer) credentialScope(t time.Time) string { - return fmt.Sprintf("%s/%s/%s/aws4_request", t.Format(ISO8601BasicFormatShort), s.region.Name, s.serviceName) -} - -/* -signature method calculates the AWS Signature Version 4 according to Task 3 of the AWS Signature Version 4 Signing Process. (http://goo.gl/j0Yqe1) - - signature = HexEncode(HMAC(derived-signing-key, string-to-sign)) -*/ -func (s *V4Signer) signature(t time.Time, sts string) string { - h := s.hmac(s.derivedKey(t), []byte(sts)) - return fmt.Sprintf("%x", h) -} - -/* -derivedKey method derives a signing key to be used for signing a request. - - kSecret = Your AWS Secret Access Key - kDate = HMAC("AWS4" + kSecret, Date) - kRegion = HMAC(kDate, Region) - kService = HMAC(kRegion, Service) - kSigning = HMAC(kService, "aws4_request") -*/ -func (s *V4Signer) derivedKey(t time.Time) []byte { - h := s.hmac([]byte("AWS4"+s.auth.SecretKey), []byte(t.Format(ISO8601BasicFormatShort))) - h = s.hmac(h, []byte(s.region.Name)) - h = s.hmac(h, []byte(s.serviceName)) - h = s.hmac(h, []byte("aws4_request")) - return h -} - -/* -authorization method generates the authorization header value. -*/ -func (s *V4Signer) authorization(header http.Header, t time.Time, signature string) string { - w := new(bytes.Buffer) - fmt.Fprint(w, "AWS4-HMAC-SHA256 ") - fmt.Fprintf(w, "Credential=%s/%s, ", s.auth.AccessKey, s.credentialScope(t)) - fmt.Fprintf(w, "SignedHeaders=%s, ", s.signedHeaders(header)) - fmt.Fprintf(w, "Signature=%s", signature) - return w.String() -} - -// hash method calculates the sha256 hash for a given string -func (s *V4Signer) hash(in string) string { - h := sha256.New() - fmt.Fprintf(h, "%s", in) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -// hmac method calculates the sha256 hmac for a given slice of bytes -func (s *V4Signer) hmac(key, data []byte) []byte { - h := hmac.New(sha256.New, key) - h.Write(data) - return h.Sum(nil) -} diff --git a/vendor/github.com/docker/goamz/s3/lifecycle.go b/vendor/github.com/docker/goamz/s3/lifecycle.go deleted file mode 100644 index d9281261..00000000 --- a/vendor/github.com/docker/goamz/s3/lifecycle.go +++ /dev/null @@ -1,202 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - "encoding/xml" - "net/url" - "strconv" - "time" -) - -// Implements an interface for s3 bucket lifecycle configuration -// See goo.gl/d0bbDf for details. - -const ( - LifecycleRuleStatusEnabled = "Enabled" - LifecycleRuleStatusDisabled = "Disabled" - LifecycleRuleDateFormat = "2006-01-02" - StorageClassGlacier = "GLACIER" -) - -type Expiration struct { - Days *uint `xml:"Days,omitempty"` - Date string `xml:"Date,omitempty"` -} - -// Returns Date as a time.Time. -func (r *Expiration) ParseDate() (time.Time, error) { - return time.Parse(LifecycleRuleDateFormat, r.Date) -} - -type Transition struct { - Days *uint `xml:"Days,omitempty"` - Date string `xml:"Date,omitempty"` - StorageClass string `xml:"StorageClass"` -} - -// Returns Date as a time.Time. -func (r *Transition) ParseDate() (time.Time, error) { - return time.Parse(LifecycleRuleDateFormat, r.Date) -} - -type NoncurrentVersionExpiration struct { - Days *uint `xml:"NoncurrentDays,omitempty"` -} - -type NoncurrentVersionTransition struct { - Days *uint `xml:"NoncurrentDays,omitempty"` - StorageClass string `xml:"StorageClass"` -} - -type LifecycleRule struct { - ID string `xml:"ID"` - Prefix string `xml:"Prefix"` - Status string `xml:"Status"` - NoncurrentVersionTransition *NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` - NoncurrentVersionExpiration *NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` - Transition *Transition `xml:"Transition,omitempty"` - Expiration *Expiration `xml:"Expiration,omitempty"` -} - -// Create a lifecycle rule with arbitrary identifier id and object name prefix -// for which the rules should apply. -func NewLifecycleRule(id, prefix string) *LifecycleRule { - rule := &LifecycleRule{ - ID: id, - Prefix: prefix, - Status: LifecycleRuleStatusEnabled, - } - return rule -} - -// Adds a transition rule in days. Overwrites any previous transition rule. -func (r *LifecycleRule) SetTransitionDays(days uint) { - r.Transition = &Transition{ - Days: &days, - StorageClass: StorageClassGlacier, - } -} - -// Adds a transition rule as a date. Overwrites any previous transition rule. -func (r *LifecycleRule) SetTransitionDate(date time.Time) { - r.Transition = &Transition{ - Date: date.Format(LifecycleRuleDateFormat), - StorageClass: StorageClassGlacier, - } -} - -// Adds an expiration rule in days. Overwrites any previous expiration rule. -// Days must be > 0. -func (r *LifecycleRule) SetExpirationDays(days uint) { - r.Expiration = &Expiration{ - Days: &days, - } -} - -// Adds an expiration rule as a date. Overwrites any previous expiration rule. -func (r *LifecycleRule) SetExpirationDate(date time.Time) { - r.Expiration = &Expiration{ - Date: date.Format(LifecycleRuleDateFormat), - } -} - -// Adds a noncurrent version transition rule. Overwrites any previous -// noncurrent version transition rule. -func (r *LifecycleRule) SetNoncurrentVersionTransitionDays(days uint) { - r.NoncurrentVersionTransition = &NoncurrentVersionTransition{ - Days: &days, - StorageClass: StorageClassGlacier, - } -} - -// Adds a noncurrent version expiration rule. Days must be > 0. Overwrites -// any previous noncurrent version expiration rule. -func (r *LifecycleRule) SetNoncurrentVersionExpirationDays(days uint) { - r.NoncurrentVersionExpiration = &NoncurrentVersionExpiration{ - Days: &days, - } -} - -// Marks the rule as disabled. -func (r *LifecycleRule) Disable() { - r.Status = LifecycleRuleStatusDisabled -} - -// Marks the rule as enabled (default). -func (r *LifecycleRule) Enable() { - r.Status = LifecycleRuleStatusEnabled -} - -type LifecycleConfiguration struct { - XMLName xml.Name `xml:"LifecycleConfiguration"` - Rules *[]*LifecycleRule `xml:"Rule,omitempty"` -} - -// Adds a LifecycleRule to the configuration. -func (c *LifecycleConfiguration) AddRule(r *LifecycleRule) { - var rules []*LifecycleRule - if c.Rules != nil { - rules = *c.Rules - } - rules = append(rules, r) - c.Rules = &rules -} - -// Sets the bucket's lifecycle configuration. -func (b *Bucket) PutLifecycleConfiguration(c *LifecycleConfiguration) error { - doc, err := xml.Marshal(c) - if err != nil { - return err - } - - buf := makeXmlBuffer(doc) - digest := md5.New() - size, err := digest.Write(buf.Bytes()) - if err != nil { - return err - } - - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(int64(size), 10)}, - "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))}, - } - - req := &request{ - path: "/", - method: "PUT", - bucket: b.Name, - headers: headers, - payload: buf, - params: url.Values{"lifecycle": {""}}, - } - - return b.S3.queryV4Sign(req, nil) -} - -// Retrieves the lifecycle configuration for the bucket. AWS returns an error -// if no lifecycle found. -func (b *Bucket) GetLifecycleConfiguration() (*LifecycleConfiguration, error) { - req := &request{ - method: "GET", - bucket: b.Name, - path: "/", - params: url.Values{"lifecycle": {""}}, - } - - conf := &LifecycleConfiguration{} - err := b.S3.queryV4Sign(req, conf) - return conf, err -} - -// Delete the bucket's lifecycle configuration. -func (b *Bucket) DeleteLifecycleConfiguration() error { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: "/", - params: url.Values{"lifecycle": {""}}, - } - - return b.S3.queryV4Sign(req, nil) -} diff --git a/vendor/github.com/docker/goamz/s3/multi.go b/vendor/github.com/docker/goamz/s3/multi.go deleted file mode 100644 index d905f565..00000000 --- a/vendor/github.com/docker/goamz/s3/multi.go +++ /dev/null @@ -1,508 +0,0 @@ -package s3 - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "errors" - "io" - "net/http" - "net/url" - "sort" - "strconv" - "strings" -) - -// Multi represents an unfinished multipart upload. -// -// Multipart uploads allow sending big objects in smaller chunks. -// After all parts have been sent, the upload must be explicitly -// completed by calling Complete with the list of parts. -// -// See http://goo.gl/vJfTG for an overview of multipart uploads. -type Multi struct { - Bucket *Bucket - Key string - UploadId string -} - -// That's the default. Here just for testing. -var listMultiMax = 1000 - -type listMultiResp struct { - NextKeyMarker string - NextUploadIdMarker string - IsTruncated bool - Upload []Multi - CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` -} - -// ListMulti returns the list of unfinished multipart uploads in b. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. You can use prefixes to separate a bucket into different -// groupings of keys (to get the feeling of folders, for example). -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// See http://goo.gl/ePioY for details. -func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { - params := map[string][]string{ - "uploads": {""}, - "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)}, - "prefix": {prefix}, - "delimiter": {delim}, - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: b.Name, - params: params, - } - var resp listMultiResp - err := b.S3.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, nil, err - } - for i := range resp.Upload { - multi := &resp.Upload[i] - multi.Bucket = b - multis = append(multis, multi) - } - prefixes = append(prefixes, resp.CommonPrefixes...) - if !resp.IsTruncated { - return multis, prefixes, nil - } - params["key-marker"] = []string{resp.NextKeyMarker} - params["upload-id-marker"] = []string{resp.NextUploadIdMarker} - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -// Multi returns a multipart upload handler for the provided key -// inside b. If a multipart upload exists for key, it is returned, -// otherwise a new multipart upload is initiated with contType and perm. -func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) { - multis, _, err := b.ListMulti(key, "") - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - for _, m := range multis { - if m.Key == key { - return m, nil - } - } - return b.InitMulti(key, contType, perm, options) -} - -// InitMulti initializes a new multipart upload at the provided -// key inside b and returns a value for manipulating it. -// -// See http://goo.gl/XP8kL for details. -func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) { - headers := map[string][]string{ - "Content-Type": {contType}, - "Content-Length": {"0"}, - "x-amz-acl": {string(perm)}, - } - options.addHeaders(headers) - params := map[string][]string{ - "uploads": {""}, - } - req := &request{ - method: "POST", - bucket: b.Name, - path: key, - headers: headers, - params: params, - } - var err error - var resp struct { - UploadId string `xml:"UploadId"` - } - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, &resp) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil -} - -func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { - headers := map[string][]string{ - "x-amz-copy-source": {url.QueryEscape(source)}, - } - options.addHeaders(headers) - params := map[string][]string{ - "uploadId": {m.UploadId}, - "partNumber": {strconv.FormatInt(int64(n), 10)}, - } - - sourceBucket := m.Bucket.S3.Bucket(strings.TrimRight(strings.SplitAfterN(source, "/", 2)[0], "/")) - sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 2)[1], nil) - if err != nil { - return nil, Part{}, err - } - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - } - resp := &CopyObjectResult{} - err = m.Bucket.S3.query(req, resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, Part{}, err - } - if resp.ETag == "" { - return nil, Part{}, errors.New("part upload succeeded with no ETag") - } - return resp, Part{n, resp.ETag, sourceMeta.ContentLength}, nil - } - panic("unreachable") -} - -// PutPart sends part n of the multipart upload, reading all the content from r. -// Each part, except for the last one, must be at least 5MB in size. -// -// See http://goo.gl/pqZer for details. -func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { - partSize, _, md5b64, err := seekerInfo(r) - if err != nil { - return Part{}, err - } - return m.putPart(n, r, partSize, md5b64) -} - -func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(partSize, 10)}, - "Content-MD5": {md5b64}, - } - params := map[string][]string{ - "uploadId": {m.UploadId}, - "partNumber": {strconv.FormatInt(int64(n), 10)}, - } - for attempt := attempts.Start(); attempt.Next(); { - _, err := r.Seek(0, 0) - if err != nil { - return Part{}, err - } - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - payload: r, - } - err = m.Bucket.S3.prepare(req) - if err != nil { - return Part{}, err - } - resp, err := m.Bucket.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return Part{}, err - } - etag := resp.Header.Get("ETag") - if etag == "" { - return Part{}, errors.New("part upload succeeded with no ETag") - } - return Part{n, etag, partSize}, nil - } - panic("unreachable") -} - -func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { - _, err = r.Seek(0, 0) - if err != nil { - return 0, "", "", err - } - digest := md5.New() - size, err = io.Copy(digest, r) - if err != nil { - return 0, "", "", err - } - sum := digest.Sum(nil) - md5hex = hex.EncodeToString(sum) - md5b64 = base64.StdEncoding.EncodeToString(sum) - return size, md5hex, md5b64, nil -} - -type Part struct { - N int `xml:"PartNumber"` - ETag string - Size int64 -} - -type partSlice []Part - -func (s partSlice) Len() int { return len(s) } -func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } -func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type listPartsResp struct { - NextPartNumberMarker string - IsTruncated bool - Part []Part -} - -// That's the default. Here just for testing. -var listPartsMax = 1000 - -// Kept for backcompatability. See the documentation for ListPartsFull -func (m *Multi) ListParts() ([]Part, error) { - return m.ListPartsFull(0, listPartsMax) -} - -// ListParts returns the list of previously uploaded parts in m, -// ordered by part number (Only parts with higher part numbers than -// partNumberMarker will be listed). Only up to maxParts parts will be -// returned. -// -// See http://goo.gl/ePioY for details. -func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) { - if maxParts > listPartsMax { - maxParts = listPartsMax - } - - params := map[string][]string{ - "uploadId": {m.UploadId}, - "max-parts": {strconv.FormatInt(int64(maxParts), 10)}, - "part-number-marker": {strconv.FormatInt(int64(partNumberMarker), 10)}, - } - var parts partSlice - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - var resp listPartsResp - err := m.Bucket.S3.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - parts = append(parts, resp.Part...) - if !resp.IsTruncated { - sort.Sort(parts) - return parts, nil - } - params["part-number-marker"] = []string{resp.NextPartNumberMarker} - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -type ReaderAtSeeker interface { - io.ReaderAt - io.ReadSeeker -} - -// PutAll sends all of r via a multipart upload with parts no larger -// than partSize bytes, which must be set to at least 5MB. -// Parts previously uploaded are either reused if their checksum -// and size match the new part, or otherwise overwritten with the -// new content. -// PutAll returns all the parts of m (reused or not). -func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { - old, err := m.ListParts() - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - reuse := 0 // Index of next old part to consider reusing. - current := 1 // Part number of latest good part handled. - totalSize, err := r.Seek(0, 2) - if err != nil { - return nil, err - } - first := true // Must send at least one empty part if the file is empty. - var result []Part -NextSection: - for offset := int64(0); offset < totalSize || first; offset += partSize { - first = false - if offset+partSize > totalSize { - partSize = totalSize - offset - } - section := io.NewSectionReader(r, offset, partSize) - _, md5hex, md5b64, err := seekerInfo(section) - if err != nil { - return nil, err - } - for reuse < len(old) && old[reuse].N <= current { - // Looks like this part was already sent. - part := &old[reuse] - etag := `"` + md5hex + `"` - if part.N == current && part.Size == partSize && part.ETag == etag { - // Checksum matches. Reuse the old part. - result = append(result, *part) - current++ - continue NextSection - } - reuse++ - } - - // Part wasn't found or doesn't match. Send it. - part, err := m.putPart(current, section, partSize, md5b64) - if err != nil { - return nil, err - } - result = append(result, part) - current++ - } - return result, nil -} - -type completeUpload struct { - XMLName xml.Name `xml:"CompleteMultipartUpload"` - Parts completeParts `xml:"Part"` -} - -type completePart struct { - PartNumber int - ETag string -} - -type completeParts []completePart - -func (p completeParts) Len() int { return len(p) } -func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } -func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// We can't know in advance whether we'll have an Error or a -// CompleteMultipartUploadResult, so this structure is just a placeholder to -// know the name of the XML object. -type completeUploadResp struct { - XMLName xml.Name - InnerXML string `xml:",innerxml"` -} - -// Complete assembles the given previously uploaded parts into the -// final object. This operation may take several minutes. -// -// See http://goo.gl/2Z7Tw for details. -func (m *Multi) Complete(parts []Part) error { - params := map[string][]string{ - "uploadId": {m.UploadId}, - } - c := completeUpload{} - for _, p := range parts { - c.Parts = append(c.Parts, completePart{p.N, p.ETag}) - } - sort.Sort(c.Parts) - data, err := xml.Marshal(&c) - if err != nil { - return err - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "POST", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - payload: bytes.NewReader(data), - } - var resp completeUploadResp - if m.Bucket.Region.Name == "generic" { - headers := make(http.Header) - headers.Add("Content-Length", strconv.FormatInt(int64(len(data)), 10)) - req.headers = headers - } - err := m.Bucket.S3.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - - if err != nil { - return err - } - - // A 200 error code does not guarantee that there were no errors (see - // http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html ), - // so first figure out what kind of XML "object" we are dealing with. - - if resp.XMLName.Local == "Error" { - // S3.query does the unmarshalling for us, so we can't unmarshal - // again in a different struct... So we need to duct-tape back the - // original XML back together. - fullErrorXml := "" + resp.InnerXML + "" - s3err := &Error{} - - if err := xml.Unmarshal([]byte(fullErrorXml), s3err); err != nil { - return err - } - - return s3err - } - - if resp.XMLName.Local == "CompleteMultipartUploadResult" { - // FIXME: One could probably add a CompleteFull method returning the - // actual contents of the CompleteMultipartUploadResult object. - return nil - } - - return errors.New("Invalid XML struct returned: " + resp.XMLName.Local) - } - panic("unreachable") -} - -// Abort deletes an unifinished multipart upload and any previously -// uploaded parts for it. -// -// After a multipart upload is aborted, no additional parts can be -// uploaded using it. However, if any part uploads are currently in -// progress, those part uploads might or might not succeed. As a result, -// it might be necessary to abort a given multipart upload multiple -// times in order to completely free all storage consumed by all parts. -// -// NOTE: If the described scenario happens to you, please report back to -// the goamz authors with details. In the future such retrying should be -// handled internally, but it's not clear what happens precisely (Is an -// error returned? Is the issue completely undetectable?). -// -// See http://goo.gl/dnyJw for details. -func (m *Multi) Abort() error { - params := map[string][]string{ - "uploadId": {m.UploadId}, - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "DELETE", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - err := m.Bucket.S3.query(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - return err - } - panic("unreachable") -} diff --git a/vendor/github.com/docker/goamz/s3/s3.go b/vendor/github.com/docker/goamz/s3/s3.go deleted file mode 100644 index 5dedaf10..00000000 --- a/vendor/github.com/docker/goamz/s3/s3.go +++ /dev/null @@ -1,1305 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// -// Written by Gustavo Niemeyer -// - -package s3 - -import ( - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/http/httputil" - "net/url" - "path" - "strconv" - "strings" - "time" - - "github.com/docker/goamz/aws" -) - -const debug = false - -// The S3 type encapsulates operations with an S3 region. -type S3 struct { - aws.Auth - aws.Region - Signature int - Client *http.Client - private byte // Reserve the right of using private data. -} - -// The Bucket type encapsulates operations with an S3 bucket. -type Bucket struct { - *S3 - Name string -} - -// The Owner type represents the owner of the object in an S3 bucket. -type Owner struct { - ID string - DisplayName string -} - -// Fold options into an Options struct -// -type Options struct { - SSE bool - SSEKMS bool - SSEKMSKeyId string - SSECustomerAlgorithm string - SSECustomerKey string - SSECustomerKeyMD5 string - Meta map[string][]string - ContentEncoding string - CacheControl string - RedirectLocation string - ContentMD5 string - ContentDisposition string - Range string - StorageClass StorageClass - // What else? -} - -type CopyOptions struct { - Options - CopySourceOptions string - MetadataDirective string - ContentType string -} - -// CopyObjectResult is the output from a Copy request -type CopyObjectResult struct { - ETag string - LastModified string -} - -var attempts = aws.AttemptStrategy{ - Min: 5, - Total: 5 * time.Second, - Delay: 200 * time.Millisecond, -} - -// New creates a new S3. -func New(auth aws.Auth, region aws.Region) *S3 { - return &S3{ - Auth: auth, - Region: region, - Signature: aws.V2Signature, - Client: http.DefaultClient, - private: 0, - } -} - -// Bucket returns a Bucket with the given name. -func (s3 *S3) Bucket(name string) *Bucket { - if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket { - name = strings.ToLower(name) - } - return &Bucket{s3, name} -} - -type BucketInfo struct { - Name string - CreationDate string -} - -type GetServiceResp struct { - Owner Owner - Buckets []BucketInfo `xml:">Bucket"` -} - -// GetService gets a list of all buckets owned by an account. -// -// See http://goo.gl/wbHkGj for details. -func (s3 *S3) GetService() (*GetServiceResp, error) { - bucket := s3.Bucket("") - - r, err := bucket.Get("") - if err != nil { - return nil, err - } - - // Parse the XML response. - var resp GetServiceResp - if err = xml.Unmarshal(r, &resp); err != nil { - return nil, err - } - - return &resp, nil -} - -var createBucketConfiguration = ` - %s -` - -// locationConstraint returns an io.Reader specifying a LocationConstraint if -// required for the region. -// -// See http://goo.gl/bh9Kq for details. -func (s3 *S3) locationConstraint() io.Reader { - constraint := "" - if s3.Region.S3LocationConstraint { - constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name) - } - return strings.NewReader(constraint) -} - -type ACL string - -const ( - Private = ACL("private") - PublicRead = ACL("public-read") - PublicReadWrite = ACL("public-read-write") - AuthenticatedRead = ACL("authenticated-read") - BucketOwnerRead = ACL("bucket-owner-read") - BucketOwnerFull = ACL("bucket-owner-full-control") -) - -type StorageClass string - -const ( - ReducedRedundancy = StorageClass("REDUCED_REDUNDANCY") - StandardStorage = StorageClass("STANDARD") -) - -type ServerSideEncryption string - -const ( - S3Managed = ServerSideEncryption("AES256") - KMSManaged = ServerSideEncryption("aws:kms") -) - -// PutBucket creates a new bucket. -// -// See http://goo.gl/ndjnR for details. -func (b *Bucket) PutBucket(perm ACL) error { - headers := map[string][]string{ - "x-amz-acl": {string(perm)}, - } - req := &request{ - method: "PUT", - bucket: b.Name, - path: "/", - headers: headers, - payload: b.locationConstraint(), - } - return b.S3.query(req, nil) -} - -// DelBucket removes an existing S3 bucket. All objects in the bucket must -// be removed before the bucket itself can be removed. -// -// See http://goo.gl/GoBrY for details. -func (b *Bucket) DelBucket() (err error) { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: "/", - } - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, nil) - if !shouldRetry(err) { - break - } - } - return err -} - -// Get retrieves an object from an S3 bucket. -// -// See http://goo.gl/isCO7 for details. -func (b *Bucket) Get(path string) (data []byte, err error) { - body, err := b.GetReader(path) - if err != nil { - return nil, err - } - data, err = ioutil.ReadAll(body) - body.Close() - return data, err -} - -// GetReader retrieves an object from an S3 bucket, -// returning the body of the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading. -func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { - resp, err := b.GetResponse(path) - if resp != nil { - return resp.Body, err - } - return nil, err -} - -// GetResponse retrieves an object from an S3 bucket, -// returning the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading -func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { - return b.GetResponseWithHeaders(path, make(http.Header)) -} - -// GetReaderWithHeaders retrieves an object from an S3 bucket -// Accepts custom headers to be sent as the second parameter -// returning the body of the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading -func (b *Bucket) GetResponseWithHeaders(path string, headers map[string][]string) (resp *http.Response, err error) { - req := &request{ - bucket: b.Name, - path: path, - headers: headers, - } - err = b.S3.prepare(req) - if err != nil { - return nil, err - } - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, nil - } - panic("unreachable") -} - -// Exists checks whether or not an object exists on an S3 bucket using a HEAD request. -func (b *Bucket) Exists(path string) (exists bool, err error) { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - } - err = b.S3.prepare(req) - if err != nil { - return - } - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.S3.run(req, nil) - - if shouldRetry(err) && attempt.HasNext() { - continue - } - - if err != nil { - // We can treat a 403 or 404 as non existance - if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { - return false, nil - } - return false, err - } - - if resp.StatusCode/100 == 2 { - exists = true - } - if resp.Body != nil { - resp.Body.Close() - } - return exists, err - } - return false, fmt.Errorf("S3 Currently Unreachable") -} - -// Head HEADs an object in the S3 bucket, returns the response with -// no body see http://bit.ly/17K1ylI -func (b *Bucket) Head(path string, headers map[string][]string) (*http.Response, error) { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - headers: headers, - } - err := b.S3.prepare(req) - if err != nil { - return nil, err - } - - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, err - } - return nil, fmt.Errorf("S3 Currently Unreachable") -} - -// Put inserts an object into the S3 bucket. -// -// See http://goo.gl/FEBPD for details. -func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { - body := bytes.NewBuffer(data) - return b.PutReader(path, body, int64(len(data)), contType, perm, options) -} - -// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key -func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { - headers := map[string][]string{ - "x-amz-acl": {string(perm)}, - "x-amz-copy-source": {escapePath(source)}, - } - options.addHeaders(headers) - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - } - resp := &CopyObjectResult{} - err := b.S3.query(req, resp) - if err != nil { - return resp, err - } - return resp, nil -} - -// PutReader inserts an object into the S3 bucket by consuming data -// from r until EOF. -func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error { - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(length, 10)}, - "Content-Type": {contType}, - "x-amz-acl": {string(perm)}, - } - options.addHeaders(headers) - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - payload: r, - } - return b.S3.query(req, nil) -} - -// addHeaders adds o's specified fields to headers -func (o Options) addHeaders(headers map[string][]string) { - if o.SSE { - headers["x-amz-server-side-encryption"] = []string{string(S3Managed)} - } else if o.SSEKMS { - headers["x-amz-server-side-encryption"] = []string{string(KMSManaged)} - if len(o.SSEKMSKeyId) != 0 { - headers["x-amz-server-side-encryption-aws-kms-key-id"] = []string{o.SSEKMSKeyId} - } - } else if len(o.SSECustomerAlgorithm) != 0 && len(o.SSECustomerKey) != 0 && len(o.SSECustomerKeyMD5) != 0 { - // Amazon-managed keys and customer-managed keys are mutually exclusive - headers["x-amz-server-side-encryption-customer-algorithm"] = []string{o.SSECustomerAlgorithm} - headers["x-amz-server-side-encryption-customer-key"] = []string{o.SSECustomerKey} - headers["x-amz-server-side-encryption-customer-key-MD5"] = []string{o.SSECustomerKeyMD5} - } - if len(o.Range) != 0 { - headers["Range"] = []string{o.Range} - } - if len(o.ContentEncoding) != 0 { - headers["Content-Encoding"] = []string{o.ContentEncoding} - } - if len(o.CacheControl) != 0 { - headers["Cache-Control"] = []string{o.CacheControl} - } - if len(o.ContentMD5) != 0 { - headers["Content-MD5"] = []string{o.ContentMD5} - } - if len(o.RedirectLocation) != 0 { - headers["x-amz-website-redirect-location"] = []string{o.RedirectLocation} - } - if len(o.ContentDisposition) != 0 { - headers["Content-Disposition"] = []string{o.ContentDisposition} - } - if len(o.StorageClass) != 0 { - headers["x-amz-storage-class"] = []string{string(o.StorageClass)} - - } - for k, v := range o.Meta { - headers["x-amz-meta-"+k] = v - } -} - -// addHeaders adds o's specified fields to headers -func (o CopyOptions) addHeaders(headers map[string][]string) { - o.Options.addHeaders(headers) - if len(o.MetadataDirective) != 0 { - headers["x-amz-metadata-directive"] = []string{o.MetadataDirective} - } - if len(o.CopySourceOptions) != 0 { - headers["x-amz-copy-source-range"] = []string{o.CopySourceOptions} - } - if len(o.ContentType) != 0 { - headers["Content-Type"] = []string{o.ContentType} - } -} - -func makeXmlBuffer(doc []byte) *bytes.Buffer { - buf := new(bytes.Buffer) - buf.WriteString(xml.Header) - buf.Write(doc) - return buf -} - -type IndexDocument struct { - Suffix string `xml:"Suffix"` -} - -type ErrorDocument struct { - Key string `xml:"Key"` -} - -type RoutingRule struct { - ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"` - RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` - RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"` -} - -type RedirectAllRequestsTo struct { - HostName string `xml:"HostName"` - Protocol string `xml:"Protocol,omitempty"` -} - -type WebsiteConfiguration struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ WebsiteConfiguration"` - IndexDocument *IndexDocument `xml:"IndexDocument,omitempty"` - ErrorDocument *ErrorDocument `xml:"ErrorDocument,omitempty"` - RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` - RedirectAllRequestsTo *RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` -} - -// PutBucketWebsite configures a bucket as a website. -// -// See http://goo.gl/TpRlUy for details. -func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { - doc, err := xml.Marshal(configuration) - if err != nil { - return err - } - - buf := makeXmlBuffer(doc) - - return b.PutBucketSubresource("website", buf, int64(buf.Len())) -} - -func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error { - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(length, 10)}, - } - req := &request{ - path: "/", - method: "PUT", - bucket: b.Name, - headers: headers, - payload: r, - params: url.Values{subresource: {""}}, - } - - return b.S3.query(req, nil) -} - -// Del removes an object from the S3 bucket. -// -// See http://goo.gl/APeTt for details. -func (b *Bucket) Del(path string) error { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: path, - } - return b.S3.query(req, nil) -} - -type Delete struct { - Quiet bool `xml:"Quiet,omitempty"` - Objects []Object `xml:"Object"` -} - -type Object struct { - Key string `xml:"Key"` - VersionId string `xml:"VersionId,omitempty"` -} - -// DelMulti removes up to 1000 objects from the S3 bucket. -// -// See http://goo.gl/jx6cWK for details. -func (b *Bucket) DelMulti(objects Delete) error { - doc, err := xml.Marshal(objects) - if err != nil { - return err - } - - buf := makeXmlBuffer(doc) - digest := md5.New() - size, err := digest.Write(buf.Bytes()) - if err != nil { - return err - } - - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(int64(size), 10)}, - "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))}, - "Content-Type": {"text/xml"}, - } - req := &request{ - path: "/", - method: "POST", - params: url.Values{"delete": {""}}, - bucket: b.Name, - headers: headers, - payload: buf, - } - - return b.S3.query(req, nil) -} - -// The ListResp type holds the results of a List bucket operation. -type ListResp struct { - Name string - Prefix string - Delimiter string - Marker string - MaxKeys int - // IsTruncated is true if the results have been truncated because - // there are more keys and prefixes than can fit in MaxKeys. - // N.B. this is the opposite sense to that documented (incorrectly) in - // http://goo.gl/YjQTc - IsTruncated bool - Contents []Key - CommonPrefixes []string `xml:">Prefix"` - // if IsTruncated is true, pass NextMarker as marker argument to List() - // to get the next set of keys - NextMarker string -} - -// The Key type represents an item stored in an S3 bucket. -type Key struct { - Key string - LastModified string - Size int64 - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string - StorageClass string - Owner Owner -} - -// List returns information about objects in an S3 bucket. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// The marker parameter specifies the key to start with when listing objects -// in a bucket. Amazon S3 lists objects in alphabetical order and -// will return keys alphabetically greater than the marker. -// -// The max parameter specifies how many keys + common prefixes to return in -// the response. The default is 1000. -// -// For example, given these keys in a bucket: -// -// index.html -// index2.html -// photos/2006/January/sample.jpg -// photos/2006/February/sample2.jpg -// photos/2006/February/sample3.jpg -// photos/2006/February/sample4.jpg -// -// Listing this bucket with delimiter set to "/" would yield the -// following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Contents: []Key{ -// {Key: "index.html", "index2.html"}, -// }, -// CommonPrefixes: []string{ -// "photos/", -// }, -// } -// -// Listing the same bucket with delimiter set to "/" and prefix set to -// "photos/2006/" would yield the following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Prefix: "photos/2006/", -// CommonPrefixes: []string{ -// "photos/2006/February/", -// "photos/2006/January/", -// }, -// } -// -// See http://goo.gl/YjQTc for details. -func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { - params := map[string][]string{ - "prefix": {prefix}, - "delimiter": {delim}, - "marker": {marker}, - } - if max != 0 { - params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} - } - req := &request{ - bucket: b.Name, - params: params, - } - result = &ListResp{} - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, result) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - // if NextMarker is not returned, it should be set to the name of last key, - // so let's do it so that each caller doesn't have to - if result.IsTruncated && result.NextMarker == "" { - n := len(result.Contents) - if n > 0 { - result.NextMarker = result.Contents[n-1].Key - } - } - return result, nil -} - -// The VersionsResp type holds the results of a list bucket Versions operation. -type VersionsResp struct { - Name string - Prefix string - KeyMarker string - VersionIdMarker string - MaxKeys int - Delimiter string - IsTruncated bool - Versions []Version `xml:"Version"` - CommonPrefixes []string `xml:">Prefix"` -} - -// The Version type represents an object version stored in an S3 bucket. -type Version struct { - Key string - VersionId string - IsLatest bool - LastModified string - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string - Size int64 - Owner Owner - StorageClass string -} - -func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) { - params := map[string][]string{ - "versions": {""}, - "prefix": {prefix}, - "delimiter": {delim}, - } - - if len(versionIdMarker) != 0 { - params["version-id-marker"] = []string{versionIdMarker} - } - if len(keyMarker) != 0 { - params["key-marker"] = []string{keyMarker} - } - - if max != 0 { - params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} - } - req := &request{ - bucket: b.Name, - params: params, - } - result = &VersionsResp{} - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, result) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - return result, nil -} - -type GetLocationResp struct { - Location string `xml:",innerxml"` -} - -func (b *Bucket) Location() (string, error) { - r, err := b.Get("/?location") - if err != nil { - return "", err - } - - // Parse the XML response. - var resp GetLocationResp - if err = xml.Unmarshal(r, &resp); err != nil { - return "", err - } - - if resp.Location == "" { - return "us-east-1", nil - } else { - return resp.Location, nil - } -} - -// URL returns a non-signed URL that allows retriving the -// object at path. It only works if the object is publicly -// readable (see SignedURL). -func (b *Bucket) URL(path string) string { - req := &request{ - bucket: b.Name, - path: path, - } - err := b.S3.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url() - if err != nil { - panic(err) - } - u.RawQuery = "" - return u.String() -} - -// SignedURL returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURL(path string, expires time.Time) string { - return b.SignedURLWithArgs(path, expires, nil, nil) -} - -// SignedURLWithArgs returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Values, headers http.Header) string { - return b.SignedURLWithMethod("GET", path, expires, params, headers) -} - -// SignedURLWithMethod returns a signed URL that allows anyone holding the URL -// to either retrieve the object at path or make a HEAD request against it. The signature is valid until expires. -func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, params url.Values, headers http.Header) string { - var uv = url.Values{} - - if params != nil { - uv = params - } - - if b.S3.Signature == aws.V2Signature { - uv.Set("Expires", strconv.FormatInt(expires.Unix(), 10)) - } else { - uv.Set("X-Amz-Expires", strconv.FormatInt(expires.Unix()-time.Now().Unix(), 10)) - } - - req := &request{ - method: method, - bucket: b.Name, - path: path, - params: uv, - headers: headers, - } - err := b.S3.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url() - if err != nil { - panic(err) - } - if b.S3.Auth.Token() != "" && b.S3.Signature == aws.V2Signature { - return u.String() + "&x-amz-security-token=" + url.QueryEscape(req.headers["X-Amz-Security-Token"][0]) - } else { - return u.String() - } -} - -// UploadSignedURL returns a signed URL that allows anyone holding the URL -// to upload the object at path. The signature is valid until expires. -// contenttype is a string like image/png -// name is the resource name in s3 terminology like images/ali.png [obviously excluding the bucket name itself] -func (b *Bucket) UploadSignedURL(name, method, content_type string, expires time.Time) string { - expire_date := expires.Unix() - if method != "POST" { - method = "PUT" - } - - a := b.S3.Auth - tokenData := "" - - if a.Token() != "" { - tokenData = "x-amz-security-token:" + a.Token() + "\n" - } - - stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n" + tokenData + "/" + path.Join(b.Name, name) - secretKey := a.SecretKey - accessId := a.AccessKey - mac := hmac.New(sha1.New, []byte(secretKey)) - mac.Write([]byte(stringToSign)) - macsum := mac.Sum(nil) - signature := base64.StdEncoding.EncodeToString([]byte(macsum)) - signature = strings.TrimSpace(signature) - - var signedurl *url.URL - var err error - if b.Region.S3Endpoint != "" { - signedurl, err = url.Parse(b.Region.S3Endpoint) - name = b.Name + "/" + name - } else { - signedurl, err = url.Parse("https://" + b.Name + ".s3.amazonaws.com/") - } - - if err != nil { - log.Println("ERROR sining url for S3 upload", err) - return "" - } - signedurl.Path = name - params := url.Values{} - params.Add("AWSAccessKeyId", accessId) - params.Add("Expires", strconv.FormatInt(expire_date, 10)) - params.Add("Signature", signature) - if a.Token() != "" { - params.Add("x-amz-security-token", a.Token()) - } - - signedurl.RawQuery = params.Encode() - return signedurl.String() -} - -// PostFormArgs returns the action and input fields needed to allow anonymous -// uploads to a bucket within the expiration limit -// Additional conditions can be specified with conds -func (b *Bucket) PostFormArgsEx(path string, expires time.Time, redirect string, conds []string) (action string, fields map[string]string) { - conditions := make([]string, 0) - fields = map[string]string{ - "AWSAccessKeyId": b.Auth.AccessKey, - "key": path, - } - - if token := b.S3.Auth.Token(); token != "" { - fields["x-amz-security-token"] = token - conditions = append(conditions, - fmt.Sprintf("{\"x-amz-security-token\": \"%s\"}", token)) - } - - if conds != nil { - conditions = append(conditions, conds...) - } - - conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path)) - conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name)) - if redirect != "" { - conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect)) - fields["success_action_redirect"] = redirect - } - - vExpiration := expires.Format("2006-01-02T15:04:05Z") - vConditions := strings.Join(conditions, ",") - policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions) - policy64 := base64.StdEncoding.EncodeToString([]byte(policy)) - fields["policy"] = policy64 - - signer := hmac.New(sha1.New, []byte(b.Auth.SecretKey)) - signer.Write([]byte(policy64)) - fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil)) - - action = fmt.Sprintf("%s/%s/", b.S3.Region.S3Endpoint, b.Name) - return -} - -// PostFormArgs returns the action and input fields needed to allow anonymous -// uploads to a bucket within the expiration limit -func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) { - return b.PostFormArgsEx(path, expires, redirect, nil) -} - -type request struct { - method string - bucket string - path string - params url.Values - headers http.Header - baseurl string - payload io.Reader - prepared bool -} - -func (req *request) url() (*url.URL, error) { - u, err := url.Parse(req.baseurl) - if err != nil { - return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) - } - u.RawQuery = req.params.Encode() - u.Path = req.path - return u, nil -} - -// query prepares and runs the req request. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) query(req *request, resp interface{}) error { - err := s3.prepare(req) - if err != nil { - return err - } - r, err := s3.run(req, resp) - if r != nil && r.Body != nil { - r.Body.Close() - } - return err -} - -// queryV4Signprepares and runs the req request, signed with aws v4 signatures. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) queryV4Sign(req *request, resp interface{}) error { - if req.headers == nil { - req.headers = map[string][]string{} - } - - err := s3.setBaseURL(req) - if err != nil { - return err - } - - hreq, err := s3.setupHttpRequest(req) - if err != nil { - return err - } - - // req.Host must be set for V4 signature calculation - hreq.Host = hreq.URL.Host - - signer := aws.NewV4Signer(s3.Auth, "s3", s3.Region) - signer.IncludeXAmzContentSha256 = true - signer.Sign(hreq) - - _, err = s3.doHttpRequest(hreq, resp) - return err -} - -// Sets baseurl on req from bucket name and the region endpoint -func (s3 *S3) setBaseURL(req *request) error { - if req.bucket == "" { - req.baseurl = s3.Region.S3Endpoint - } else { - req.baseurl = s3.Region.S3BucketEndpoint - if req.baseurl == "" { - // Use the path method to address the bucket. - req.baseurl = s3.Region.S3Endpoint - req.path = "/" + req.bucket + req.path - } else { - // Just in case, prevent injection. - if strings.IndexAny(req.bucket, "/:@") >= 0 { - return fmt.Errorf("bad S3 bucket: %q", req.bucket) - } - req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1) - } - } - - return nil -} - -// partiallyEscapedPath partially escapes the S3 path allowing for all S3 REST API calls. -// -// Some commands including: -// GET Bucket acl http://goo.gl/aoXflF -// GET Bucket cors http://goo.gl/UlmBdx -// GET Bucket lifecycle http://goo.gl/8Fme7M -// GET Bucket policy http://goo.gl/ClXIo3 -// GET Bucket location http://goo.gl/5lh8RD -// GET Bucket Logging http://goo.gl/sZ5ckF -// GET Bucket notification http://goo.gl/qSSZKD -// GET Bucket tagging http://goo.gl/QRvxnM -// require the first character after the bucket name in the path to be a literal '?' and -// not the escaped hex representation '%3F'. -func partiallyEscapedPath(path string) string { - pathEscapedAndSplit := strings.Split((&url.URL{Path: path}).String(), "/") - if len(pathEscapedAndSplit) >= 3 { - if len(pathEscapedAndSplit[2]) >= 3 { - // Check for the one "?" that should not be escaped. - if pathEscapedAndSplit[2][0:3] == "%3F" { - pathEscapedAndSplit[2] = "?" + pathEscapedAndSplit[2][3:] - } - } - } - return strings.Replace(strings.Join(pathEscapedAndSplit, "/"), "+", "%2B", -1) -} - -// prepare sets up req to be delivered to S3. -func (s3 *S3) prepare(req *request) error { - // Copy so they can be mutated without affecting on retries. - params := make(url.Values) - headers := make(http.Header) - for k, v := range req.params { - params[k] = v - } - for k, v := range req.headers { - headers[k] = v - } - req.params = params - req.headers = headers - - if !req.prepared { - req.prepared = true - if req.method == "" { - req.method = "GET" - } - - if !strings.HasPrefix(req.path, "/") { - req.path = "/" + req.path - } - - err := s3.setBaseURL(req) - if err != nil { - return err - } - } - - if s3.Signature == aws.V2Signature && s3.Auth.Token() != "" { - req.headers["X-Amz-Security-Token"] = []string{s3.Auth.Token()} - } else if s3.Auth.Token() != "" { - req.params.Set("X-Amz-Security-Token", s3.Auth.Token()) - } - - if s3.Signature == aws.V2Signature { - // Always sign again as it's not clear how far the - // server has handled a previous attempt. - u, err := url.Parse(req.baseurl) - if err != nil { - return err - } - - signpathPartiallyEscaped := partiallyEscapedPath(req.path) - if strings.IndexAny(s3.Region.S3BucketEndpoint, "${bucket}") >= 0 { - signpathPartiallyEscaped = "/" + req.bucket + signpathPartiallyEscaped - } - req.headers["Host"] = []string{u.Host} - req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)} - - sign(s3.Auth, req.method, signpathPartiallyEscaped, req.params, req.headers) - } else { - hreq, err := s3.setupHttpRequest(req) - if err != nil { - return err - } - - hreq.Host = hreq.URL.Host - signer := aws.NewV4Signer(s3.Auth, "s3", s3.Region) - signer.IncludeXAmzContentSha256 = true - signer.Sign(hreq) - - req.payload = hreq.Body - if _, ok := headers["Content-Length"]; ok { - req.headers["Content-Length"] = headers["Content-Length"] - } - } - return nil -} - -// Prepares an *http.Request for doHttpRequest -func (s3 *S3) setupHttpRequest(req *request) (*http.Request, error) { - // Copy so that signing the http request will not mutate it - headers := make(http.Header) - for k, v := range req.headers { - headers[k] = v - } - req.headers = headers - - u, err := req.url() - if err != nil { - return nil, err - } - if s3.Region.Name != "generic" { - u.Opaque = fmt.Sprintf("//%s%s", u.Host, partiallyEscapedPath(u.Path)) - } - - hreq := http.Request{ - URL: u, - Method: req.method, - ProtoMajor: 1, - ProtoMinor: 1, - Header: req.headers, - Form: req.params, - } - - if v, ok := req.headers["Content-Length"]; ok { - hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64) - delete(req.headers, "Content-Length") - } - if req.payload != nil { - hreq.Body = ioutil.NopCloser(req.payload) - } - - return &hreq, nil -} - -// doHttpRequest sends hreq and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) doHttpRequest(hreq *http.Request, resp interface{}) (*http.Response, error) { - hresp, err := s3.Client.Do(hreq) - if err != nil { - return nil, err - } - if debug { - dump, _ := httputil.DumpResponse(hresp, true) - log.Printf("} -> %s\n", dump) - } - if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 { - return nil, buildError(hresp) - } - if resp != nil { - err = xml.NewDecoder(hresp.Body).Decode(resp) - hresp.Body.Close() - - if debug { - log.Printf("goamz.s3> decoded xml into %#v", resp) - } - - } - return hresp, err -} - -// run sends req and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) { - if debug { - log.Printf("Running S3 request: %#v", req) - } - - hreq, err := s3.setupHttpRequest(req) - if err != nil { - return nil, err - } - - return s3.doHttpRequest(hreq, resp) -} - -// Error represents an error in an operation with S3. -type Error struct { - StatusCode int // HTTP status code (200, 403, ...) - Code string // EC2 error code ("UnsupportedOperation", ...) - Message string // The human-oriented error message - BucketName string - RequestId string - HostId string -} - -func (e *Error) Error() string { - return e.Message -} - -func buildError(r *http.Response) error { - if debug { - log.Printf("got error (status code %v)", r.StatusCode) - data, err := ioutil.ReadAll(r.Body) - if err != nil { - log.Printf("\tread error: %v", err) - } else { - log.Printf("\tdata:\n%s\n\n", data) - } - r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) - } - - err := Error{} - // TODO return error if Unmarshal fails? - xml.NewDecoder(r.Body).Decode(&err) - r.Body.Close() - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - if debug { - log.Printf("err: %#v\n", err) - } - return &err -} - -func shouldRetry(err error) bool { - if err == nil { - return false - } - switch err { - case io.ErrUnexpectedEOF, io.EOF: - return true - } - switch e := err.(type) { - case *net.DNSError: - return true - case *net.OpError: - switch e.Op { - case "dial", "read", "write": - return true - } - case *url.Error: - // url.Error can be returned either by net/url if a URL cannot be - // parsed, or by net/http if the response is closed before the headers - // are received or parsed correctly. In that later case, e.Op is set to - // the HTTP method name with the first letter uppercased. We don't want - // to retry on POST operations, since those are not idempotent, all the - // other ones should be safe to retry. The only case where all - // operations are safe to retry are "dial" errors, since in that case - // the POST request didn't make it to the server. - - if netErr, ok := e.Err.(*net.OpError); ok && netErr.Op == "dial" { - return true - } - - switch e.Op { - case "Get", "Put", "Delete", "Head": - return shouldRetry(e.Err) - default: - return false - } - case *Error: - switch e.Code { - case "InternalError", "NoSuchUpload", "NoSuchBucket": - return true - } - switch e.StatusCode { - case 500, 503, 504: - return true - } - } - return false -} - -func hasCode(err error, code string) bool { - s3err, ok := err.(*Error) - return ok && s3err.Code == code -} - -func escapePath(s string) string { - return (&url.URL{Path: s}).String() -} diff --git a/vendor/github.com/docker/goamz/s3/sign.go b/vendor/github.com/docker/goamz/s3/sign.go deleted file mode 100644 index 6f24c667..00000000 --- a/vendor/github.com/docker/goamz/s3/sign.go +++ /dev/null @@ -1,120 +0,0 @@ -package s3 - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "github.com/docker/goamz/aws" - "log" - "sort" - "strings" -) - -var b64 = base64.StdEncoding - -// ---------------------------------------------------------------------------- -// S3 signing (http://goo.gl/G1LrK) - -var s3ParamsToSign = map[string]bool{ - "acl": true, - "location": true, - "logging": true, - "notification": true, - "partNumber": true, - "policy": true, - "requestPayment": true, - "torrent": true, - "uploadId": true, - "uploads": true, - "versionId": true, - "versioning": true, - "versions": true, - "response-content-type": true, - "response-content-language": true, - "response-expires": true, - "response-cache-control": true, - "response-content-disposition": true, - "response-content-encoding": true, - "website": true, - "delete": true, -} - -func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) { - var md5, ctype, date, xamz string - var xamzDate bool - var keys, sarray []string - xheaders := make(map[string]string) - for k, v := range headers { - k = strings.ToLower(k) - switch k { - case "content-md5": - md5 = v[0] - case "content-type": - ctype = v[0] - case "date": - if !xamzDate { - date = v[0] - } - default: - if strings.HasPrefix(k, "x-amz-") { - keys = append(keys, k) - xheaders[k] = strings.Join(v, ",") - if k == "x-amz-date" { - xamzDate = true - date = "" - } - } - } - } - if len(keys) > 0 { - sort.StringSlice(keys).Sort() - for i := range keys { - key := keys[i] - value := xheaders[key] - sarray = append(sarray, key+":"+value) - } - xamz = strings.Join(sarray, "\n") + "\n" - } - - expires := false - if v, ok := params["Expires"]; ok { - // Query string request authentication alternative. - expires = true - date = v[0] - params["AWSAccessKeyId"] = []string{auth.AccessKey} - } - - sarray = sarray[0:0] - for k, v := range params { - if s3ParamsToSign[k] { - for _, vi := range v { - if vi == "" { - sarray = append(sarray, k) - } else { - // "When signing you do not encode these values." - sarray = append(sarray, k+"="+vi) - } - } - } - } - if len(sarray) > 0 { - sort.StringSlice(sarray).Sort() - canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") - } - - payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath - hash := hmac.New(sha1.New, []byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - if expires { - params["Signature"] = []string{string(signature)} - } else { - headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)} - } - if debug { - log.Printf("Signature payload: %q", payload) - log.Printf("Signature: %q", signature) - } -} From d260b18f2fc44a69a0d1ab865e1d889f67edb911 Mon Sep 17 00:00:00 2001 From: Olivier Date: Fri, 10 Aug 2018 14:58:51 -0700 Subject: [PATCH 134/276] Remove ciphers that do not support perfect forward secrecy Signed-off-by: Olivier --- registry/registry.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/registry/registry.go b/registry/registry.go index 36aef47c..0737f7d0 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -139,8 +139,6 @@ func (registry *Registry) ListenAndServe() error { tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, }, } From a64394ece5f69596602bcdf12ad578caf5ea89b8 Mon Sep 17 00:00:00 2001 From: Parth Mehrotra Date: Sat, 11 Aug 2018 12:33:06 -0400 Subject: [PATCH 135/276] Fixed customer facing grammar and spelling Indentation is spelled wrong as intendation everywhere the word occurs Can either be: > "found a tab character that violates indentation" or > "found tab characters that violate indentation" Signed-off-by: Parth Mehrotra --- vendor/gopkg.in/yaml.v2/scannerc.go | 36 ++++++++++++++--------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index fe93b190..0c86a82f 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -961,15 +961,15 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml } // Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each intendation level, append +// becomes less or equal to the column. For each indentation level, append // the BLOCK-END token. func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { // In the flow context, do nothing. - if parser.flow_level > 0 { + if parser.flow_level > 0 { return true } - // Loop through the intendation levels in the stack. + // Loop through the indentation levels in the stack. for parser.indent > column { // Create a token and append it to the queue. token := yaml_token_t{ @@ -2085,14 +2085,14 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l return false } if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the intendation is greater than 0. + // Check that the indentation is greater than 0. if parser.buffer[parser.buffer_pos] == '0' { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an intendation indicator equal to 0") + start_mark, "found an indentation indicator equal to 0") return false } - // Get the intendation level and eat the indicator. + // Get the indentation level and eat the indicator. increment = as_digit(parser.buffer, parser.buffer_pos) skip(parser) } @@ -2102,7 +2102,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l if parser.buffer[parser.buffer_pos] == '0' { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an intendation indicator equal to 0") + start_mark, "found an indentation indicator equal to 0") return false } increment = as_digit(parser.buffer, parser.buffer_pos) @@ -2157,7 +2157,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l end_mark := parser.mark - // Set the intendation level if it was specified. + // Set the indentation level if it was specified. var indent int if increment > 0 { if parser.indent >= 0 { @@ -2217,7 +2217,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l leading_break = read_line(parser, leading_break) - // Eat the following intendation spaces and line breaks. + // Eat the following indentation spaces and line breaks. if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { return false } @@ -2245,15 +2245,15 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l return true } -// Scan intendation spaces and line breaks for a block scalar. Determine the -// intendation level if needed. +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { *end_mark = parser.mark - // Eat the intendation spaces and line breaks. + // Eat the indentation spaces and line breaks. max_indent := 0 for { - // Eat the intendation spaces. + // Eat the indentation spaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } @@ -2267,10 +2267,10 @@ func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, br max_indent = parser.mark.column } - // Check for a tab character messing the intendation. + // Check for a tab character messing the indentation. if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an intendation space is expected") + start_mark, "found a tab character where an indentation space is expected") } // Have we found a non-empty line? @@ -2655,10 +2655,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { - // Check for tab character that abuse intendation. + // Check for tab character that abuse indentation. if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate intendation") + start_mark, "found a tab character that violates indatation") return false } @@ -2687,7 +2687,7 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b } } - // Check intendation level. + // Check indentation level. if parser.flow_level == 0 && parser.mark.column < indent { break } From 2fdb2ac270b2185691acc23402d6f6db9875c30f Mon Sep 17 00:00:00 2001 From: Mike Brown Date: Mon, 23 Jul 2018 17:03:17 -0500 Subject: [PATCH 136/276] adds validation testing for schema version values Signed-off-by: Mike Brown --- registry/storage/manifestlisthandler.go | 4 ++++ registry/storage/manifeststore_test.go | 15 ++++++++++++++- registry/storage/ocimanifesthandler.go | 4 ++++ registry/storage/schema2manifesthandler.go | 4 ++++ 4 files changed, 26 insertions(+), 1 deletion(-) diff --git a/registry/storage/manifestlisthandler.go b/registry/storage/manifestlisthandler.go index 085ccccc..43468f66 100644 --- a/registry/storage/manifestlisthandler.go +++ b/registry/storage/manifestlisthandler.go @@ -64,6 +64,10 @@ func (ms *manifestListHandler) Put(ctx context.Context, manifestList distributio func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { var errs distribution.ErrManifestVerification + if mnfst.SchemaVersion != 2 { + return fmt.Errorf("unrecognized manifest list schema version %d", mnfst.SchemaVersion) + } + if !skipDependencyVerification { // This manifest service is different from the blob service // returned by Blob. It uses a linked blob store to ensure that diff --git a/registry/storage/manifeststore_test.go b/registry/storage/manifeststore_test.go index 7c9b9edf..6c6cf905 100644 --- a/registry/storage/manifeststore_test.go +++ b/registry/storage/manifeststore_test.go @@ -424,9 +424,22 @@ func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes boo t.Fatalf("%s: unexpected error generating manifest: %v", testname, err) } + // before putting the manifest test for proper handling of SchemaVersion + + if manifest.(*ocischema.DeserializedManifest).Manifest.SchemaVersion != 2 { + t.Fatalf("%s: unexpected error generating default version for oci manifest", testname) + } + manifest.(*ocischema.DeserializedManifest).Manifest.SchemaVersion = 0 + var manifestDigest digest.Digest if manifestDigest, err = ms.Put(ctx, manifest); err != nil { - t.Fatalf("%s: unexpected error putting manifest: %v", testname, err) + if err.Error() != "unrecognized manifest schema version 0" { + t.Fatalf("%s: unexpected error putting manifest: %v", testname, err) + } + manifest.(*ocischema.DeserializedManifest).Manifest.SchemaVersion = 2 + if manifestDigest, err = ms.Put(ctx, manifest); err != nil { + t.Fatalf("%s: unexpected error putting manifest: %v", testname, err) + } } // Also create an image index that contains the manifest diff --git a/registry/storage/ocimanifesthandler.go b/registry/storage/ocimanifesthandler.go index 57ff2829..03264b8c 100644 --- a/registry/storage/ocimanifesthandler.go +++ b/registry/storage/ocimanifesthandler.go @@ -66,6 +66,10 @@ func (ms *ocischemaManifestHandler) Put(ctx context.Context, manifest distributi func (ms *ocischemaManifestHandler) verifyManifest(ctx context.Context, mnfst ocischema.DeserializedManifest, skipDependencyVerification bool) error { var errs distribution.ErrManifestVerification + if mnfst.Manifest.SchemaVersion != 2 { + return fmt.Errorf("unrecognized manifest schema version %d", mnfst.Manifest.SchemaVersion) + } + if skipDependencyVerification { return nil } diff --git a/registry/storage/schema2manifesthandler.go b/registry/storage/schema2manifesthandler.go index b0ae01ae..863ab2ba 100644 --- a/registry/storage/schema2manifesthandler.go +++ b/registry/storage/schema2manifesthandler.go @@ -73,6 +73,10 @@ func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { var errs distribution.ErrManifestVerification + if mnfst.Manifest.SchemaVersion != 2 { + return fmt.Errorf("unrecognized manifest schema version %d", mnfst.Manifest.SchemaVersion) + } + if skipDependencyVerification { return nil } From 57212c909b6600b6a40a6289f7e775d757b6bdcf Mon Sep 17 00:00:00 2001 From: Ryan Abrams Date: Tue, 14 Aug 2018 12:13:40 -0700 Subject: [PATCH 137/276] Bump aws sdk to v1.15.11 This is the latest official release for this dependency Signed-off-by: Ryan Abrams --- vendor.conf | 2 +- vendor/github.com/aws/aws-sdk-go/README.md | 6 +- .../aws/aws-sdk-go/aws/client/client.go | 10 +- .../aws-sdk-go/aws/client/default_retryer.go | 28 +- .../aws/aws-sdk-go/aws/client/logger.go | 106 +- .../aws/client/metadata/client_info.go | 1 + .../github.com/aws/aws-sdk-go/aws/config.go | 24 +- .../aws-sdk-go/aws/corehandlers/handlers.go | 28 +- .../aws-sdk-go/aws/corehandlers/user_agent.go | 37 + .../aws-sdk-go/aws/credentials/credentials.go | 27 +- .../ec2rolecreds/ec2_role_provider.go | 6 +- .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 46 + .../aws/aws-sdk-go/aws/csm/enable.go | 67 + .../aws/aws-sdk-go/aws/csm/metric.go | 51 + .../aws/aws-sdk-go/aws/csm/metric_chan.go | 54 + .../aws/aws-sdk-go/aws/csm/reporter.go | 231 +++ .../aws/aws-sdk-go/aws/defaults/defaults.go | 56 +- .../aws/aws-sdk-go/aws/ec2metadata/api.go | 8 +- .../aws/aws-sdk-go/aws/ec2metadata/service.go | 24 + .../aws/aws-sdk-go/aws/endpoints/decode.go | 22 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 827 +++++++- .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 22 +- .../aws/aws-sdk-go/aws/endpoints/v3model.go | 12 +- .../github.com/aws/aws-sdk-go/aws/logger.go | 6 + .../aws/aws-sdk-go/aws/request/handlers.go | 18 + .../aws-sdk-go/aws/request/offset_reader.go | 4 +- .../aws/aws-sdk-go/aws/request/request.go | 62 +- .../aws/aws-sdk-go/aws/request/request_1_7.go | 2 +- .../aws/aws-sdk-go/aws/request/request_1_8.go | 2 +- .../aws/request/request_pagination.go | 40 +- .../aws/aws-sdk-go/aws/request/retryer.go | 2 +- .../aws/aws-sdk-go/aws/session/doc.go | 2 +- .../aws/aws-sdk-go/aws/session/env_config.go | 28 + .../aws/aws-sdk-go/aws/session/session.go | 64 +- .../aws/aws-sdk-go/aws/signer/v4/v4.go | 75 +- vendor/github.com/aws/aws-sdk-go/aws/types.go | 83 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 10 + .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 12 + .../internal/sdkrand/locked_source.go | 29 + .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 + .../private/protocol/eventstream/debug.go | 144 ++ .../private/protocol/eventstream/decode.go | 199 ++ .../private/protocol/eventstream/encode.go | 114 ++ .../private/protocol/eventstream/error.go | 23 + .../eventstream/eventstreamapi/api.go | 196 ++ .../eventstream/eventstreamapi/error.go | 24 + .../private/protocol/eventstream/header.go | 166 ++ .../protocol/eventstream/header_value.go | 501 +++++ .../private/protocol/eventstream/message.go | 103 + .../aws-sdk-go/private/protocol/payload.go | 81 + .../private/protocol/query/build.go | 2 +- .../protocol/query/queryutil/queryutil.go | 7 +- .../aws-sdk-go/private/protocol/rest/build.go | 12 +- .../private/protocol/rest/unmarshal.go | 6 +- .../aws-sdk-go/private/protocol/timestamp.go | 72 + .../private/protocol/xml/xmlutil/build.go | 30 +- .../private/protocol/xml/xmlutil/unmarshal.go | 20 +- .../protocol/xml/xmlutil/xml_to_struct.go | 1 + .../aws/aws-sdk-go/service/s3/api.go | 1728 ++++++++++++----- .../aws/aws-sdk-go/service/s3/body_hash.go | 249 +++ .../aws/aws-sdk-go/service/s3/content_md5.go | 36 - .../aws-sdk-go/service/s3/customizations.go | 6 + .../aws/aws-sdk-go/service/s3/service.go | 8 +- .../aws-sdk-go/service/s3/statusok_error.go | 3 +- .../aws/aws-sdk-go/service/sts/api.go | 198 +- .../aws/aws-sdk-go/service/sts/service.go | 6 +- 67 files changed, 5178 insertions(+), 916 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go diff --git a/vendor.conf b/vendor.conf index d0efe39d..751f8848 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,7 +1,7 @@ github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 -github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd +github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490 github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 diff --git a/vendor/github.com/aws/aws-sdk-go/README.md b/vendor/github.com/aws/aws-sdk-go/README.md index d8acfdd0..3200765e 100644 --- a/vendor/github.com/aws/aws-sdk-go/README.md +++ b/vendor/github.com/aws/aws-sdk-go/README.md @@ -6,6 +6,8 @@ aws-sdk-go is the official AWS SDK for the Go programming language. Checkout our [release notes](https://github.com/aws/aws-sdk-go/releases) for information about the latest bug fixes, updates, and features added to the SDK. +We [announced](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-2-0-developer-preview/) the Developer Preview for the [v2 AWS SDK for Go](https://github.com/aws/aws-sdk-go-v2). The v2 SDK is available at https://github.com/aws/aws-sdk-go-v2, and `go get github.com/aws/aws-sdk-go-v2` via `go get`. Check out the v2 SDK's [changes and updates](https://github.com/aws/aws-sdk-go-v2/blob/master/CHANGELOG.md), and let us know what you think. We want your feedback. + ## Installing If you are using Go 1.5 with the `GO15VENDOREXPERIMENT=1` vendoring flag, or 1.6 and higher you can use the following command to retrieve the SDK. The SDK's non-testing dependencies will be included and are vendored in the `vendor` folder. @@ -305,7 +307,7 @@ documentation for the errors that could be returned. // will leak connections. defer result.Body.Close() - fmt.Println("Object Size:", aws.StringValue(result.ContentLength)) + fmt.Println("Object Size:", aws.Int64Value(result.ContentLength)) ``` ### API Request Pagination and Resource Waiters @@ -332,7 +334,7 @@ take a callback function that will be called for each page of the API's response ``` Waiter helper methods provide the functionality to wait for an AWS resource -state. These methods abstract the logic needed to to check the state of an +state. These methods abstract the logic needed to check the state of an AWS resource, and wait until that resource is in a desired state. The waiter will block until the resource is in the state that is desired, an error occurs, or the waiter times out. If a resource times out the error code returned will diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 788fe6e2..212fe25e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -15,6 +15,12 @@ type Config struct { Endpoint string SigningRegion string SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overriden based on metadata the + // service has. + SigningNameDerived bool } // ConfigProvider provides a generic way for a service client to receive @@ -85,6 +91,6 @@ func (c *Client) AddDebugHandlers() { return } - c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) - c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 63d2df67..a397b0d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,12 +1,11 @@ package client import ( - "math/rand" "strconv" - "sync" "time" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" ) // DefaultRetryer implements basic retry logic using exponential backoff for @@ -31,8 +30,6 @@ func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } -var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) - // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { // Set the upper limit of delay in retrying at ~five minutes @@ -53,7 +50,7 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { retryCount = 13 } - delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) return time.Duration(delay) * time.Millisecond } @@ -65,7 +62,7 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { return *r.Retryable } - if r.HTTPResponse.StatusCode >= 500 { + if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { return true } return r.IsErrorRetryable() || d.shouldThrottle(r) @@ -117,22 +114,3 @@ func canUseRetryAfterHeader(r *request.Request) bool { return true } - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index 1f39c91f..ce9fb896 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -44,22 +44,57 @@ func (reader *teeReaderCloser) Close() error { return reader.Source.Close() } +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + func logRequest(r *request.Request) { logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) return } if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's // Body as a NoOpCloser and will not be reset after read by the HTTP // client reader. r.ResetBody() } - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) } const logRespMsg = `DEBUG: Response %s/%s Details: @@ -72,27 +107,44 @@ const logRespErrMsg = `DEBUG ERROR: Response %s/%s: %s -----------------------------------------------------` +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + func logResponse(r *request.Request) { lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - r.HTTPResponse.Body = &teeReaderCloser{ - Reader: io.TeeReader(r.HTTPResponse.Body, lw), - Source: r.HTTPResponse.Body, + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } } handlerFn := func(req *request.Request) { - body, err := httputil.DumpResponse(req.HTTPResponse, false) + b, err := httputil.DumpResponse(req.HTTPResponse, false) if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) return } - b, err := ioutil.ReadAll(lw.buf) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body))) - if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) { + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + lw.Logger.Log(string(b)) } } @@ -106,3 +158,27 @@ func logResponse(r *request.Request) { Name: handlerName, Fn: handlerFn, }) } + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 4778056d..920e9fdd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -3,6 +3,7 @@ package metadata // ClientInfo wraps immutable data from the client.Client structure. type ClientInfo struct { ServiceName string + ServiceID string APIVersion string Endpoint string SigningName string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index ae3a2869..5421b5d4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -151,6 +151,15 @@ type Config struct { // with accelerate. S3UseAccelerate *bool + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + // Set this to `true` to disable the EC2Metadata client from overriding the // default http.Client's Timeout. This is helpful if you do not want the // EC2Metadata client to create a new http.Client. This options is only @@ -168,7 +177,7 @@ type Config struct { // EC2MetadataDisableTimeoutOverride *bool - // Instructs the endpiont to be generated for a service client to + // Instructs the endpoint to be generated for a service client to // be the dual stack endpoint. The dual stack endpoint will support // both IPv4 and IPv6 addressing. // @@ -336,6 +345,15 @@ func (c *Config) WithS3Disable100Continue(disable bool) *Config { func (c *Config) WithS3UseAccelerate(enable bool) *Config { c.S3UseAccelerate = &enable return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + } // WithUseDualStack sets a config UseDualStack value returning a Config @@ -435,6 +453,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3UseAccelerate = other.S3UseAccelerate } + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + if other.UseDualStack != nil { dst.UseDualStack = other.UseDualStack } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 495e3ef6..cfcddf3d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -3,12 +3,10 @@ package corehandlers import ( "bytes" "fmt" - "io" "io/ioutil" "net/http" "net/url" "regexp" - "runtime" "strconv" "time" @@ -36,18 +34,13 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { length, _ = strconv.ParseInt(slength, 10, 64) } else { - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.BodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.BodyStart, 0) // make sure to seek back to original location - length = end - r.BodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } } } @@ -60,13 +53,6 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen } }} -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - var reStatusCode = regexp.MustCompile(`^(\d{3})`) // ValidateReqSigHandler is a request handler to ensure that the request's diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 00000000..a15f496b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec_env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 42416fc2..a270844d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -158,13 +158,14 @@ func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { // IsExpired returns if the credentials are expired. func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now } - return e.expiration.Before(e.CurrentTime()) + return e.expiration.Before(curTime()) } -// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// A Credentials provides concurrency safe retrieval of AWS credentials Value. // Credentials will cache the credentials value until they expire. Once the value // expires the next Get will attempt to retrieve valid credentials. // @@ -178,7 +179,8 @@ func (e *Expiry) IsExpired() bool { type Credentials struct { creds Value forceRefresh bool - m sync.Mutex + + m sync.RWMutex provider Provider } @@ -201,6 +203,17 @@ func NewCredentials(provider Provider) *Credentials { // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. func (c *Credentials) Get() (Value, error) { + // Check the cached credentials first with just the read lock. + c.m.RLock() + if !c.isExpired() { + creds := c.creds + c.m.RUnlock() + return creds, nil + } + c.m.RUnlock() + + // Credentials are expired need to retrieve the credentials taking the full + // lock. c.m.Lock() defer c.m.Unlock() @@ -234,8 +247,8 @@ func (c *Credentials) Expire() { // If the Credentials were forced to be expired with Expire() this will // reflect that override. func (c *Credentials) IsExpired() bool { - c.m.Lock() - defer c.m.Unlock() + c.m.RLock() + defer c.m.RUnlock() return c.isExpired() } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index c3974952..0ed791be 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -4,7 +4,6 @@ import ( "bufio" "encoding/json" "fmt" - "path" "strings" "time" @@ -12,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/internal/sdkuri" ) // ProviderName provides a name of EC2Role provider @@ -125,7 +125,7 @@ type ec2RoleCredRespBody struct { Message string } -const iamSecurityCredsPath = "/iam/security-credentials" +const iamSecurityCredsPath = "iam/security-credentials/" // requestCredList requests a list of credentials from the EC2 service. // If there are no credentials, or there is an error making or receiving the request @@ -153,7 +153,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { // If the credentials cannot be found, or there is an error reading the response // and error will be returned. func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 00000000..152d785b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,46 @@ +// Package csm provides Client Side Monitoring (CSM) which enables sending metrics +// via UDP connection. Using the Start function will enable the reporting of +// metrics on a given port. If Start is called, with different parameters, again, +// a panic will occur. +// +// Pause can be called to pause any metrics publishing on a given port. Sessions +// that have had their handlers modified via InjectHandlers may still be used. +// However, the handlers will act as a no-op meaning no metrics will be published. +// +// Example: +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// r.InjectHandlers(&sess.Handlers) +// +// client := s3.New(sess) +// resp, err := client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +// +// Start returns a Reporter that is used to enable or disable monitoring. If +// access to the Reporter is required later, calling Get will return the Reporter +// singleton. +// +// Example: +// r := csm.Get() +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 00000000..2f0c6eac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,67 @@ +package csm + +import ( + "fmt" + "sync" +) + +var ( + lock sync.Mutex +) + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// Start will start the a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// Example: +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 00000000..4b0d630e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,51 @@ +package csm + +import ( + "strconv" + "time" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 00000000..514fc373 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,54 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(&ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(&ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(&ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 00000000..11082e5e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,231 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // DefaultPort is used when no port is specified + DefaultPort = "31000" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptCount: aws.Int(r.RetryCount + 1), + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + setError(&m, awserr) + } + } + + rep.metricsCh.Push(m) +} + +func setError(m *metric, err awserr.Error) { + msg := err.Error() + code := err.Code() + + switch code { + case "RequestError", + "SerializationError", + request.CanceledErrorCode: + m.SDKException = &code + m.SDKExceptionMessage = &msg + default: + m.AWSException = &code + m.AWSExceptionMessage = &msg + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + } + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from +// being added. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring +// to be resumed. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// Example: +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric} + apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric} + + handlers.Complete.PushFrontNamed(apiCallHandler) + handlers.Complete.PushFrontNamed(apiCallAttemptHandler) + + handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 07afe3b8..5040a2f6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -9,6 +9,7 @@ package defaults import ( "fmt" + "net" "net/http" "net/url" "os" @@ -72,6 +73,7 @@ func Handlers() request.Handlers { handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) handlers.Validate.AfterEachFn = request.HandlerListStopOnError handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) handlers.Build.AfterEachFn = request.HandlerListStopOnError handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) @@ -90,14 +92,25 @@ func Handlers() request.Handlers { func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { return credentials.NewCredentials(&credentials.ChainProvider{ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - RemoteCredProvider(*cfg, handlers), - }, + Providers: CredProviders(cfg, handlers), }) } +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + const ( httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" @@ -118,14 +131,43 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P return ec2RoleProvider(cfg, handlers) } +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { var errMsg string parsed, err := url.Parse(u) if err != nil { errMsg = fmt.Sprintf("invalid URL, %v", err) - } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { - errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } } if len(errMsg) > 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index 984407a5..c215cd3f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -4,12 +4,12 @@ import ( "encoding/json" "fmt" "net/http" - "path" "strings" "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" ) // GetMetadata uses the path provided to request information from the EC2 @@ -19,7 +19,7 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", HTTPMethod: "GET", - HTTPPath: path.Join("/", "meta-data", p), + HTTPPath: sdkuri.PathJoin("/meta-data", p), } output := &metadataOutput{} @@ -35,7 +35,7 @@ func (c *EC2Metadata) GetUserData() (string, error) { op := &request.Operation{ Name: "GetUserData", HTTPMethod: "GET", - HTTPPath: path.Join("/", "user-data"), + HTTPPath: "/user-data", } output := &metadataOutput{} @@ -56,7 +56,7 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { op := &request.Operation{ Name: "GetDynamicData", HTTPMethod: "GET", - HTTPPath: path.Join("/", "dynamic", p), + HTTPPath: sdkuri.PathJoin("/dynamic", p), } output := &metadataOutput{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index 5b4379db..ef5f7329 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -1,5 +1,10 @@ // Package ec2metadata provides the client for making API calls to the // EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environemnt variable is set to true, (case insensitive). package ec2metadata import ( @@ -7,17 +12,21 @@ import ( "errors" "io" "net/http" + "os" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/request" ) // ServiceName is the name of the service. const ServiceName = "ec2metadata" +const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" // A EC2Metadata is an EC2 Metadata service Client. type EC2Metadata struct { @@ -75,6 +84,21 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio svc.Handlers.Validate.Clear() svc.Handlers.Validate.PushBack(validateEndpointHandler) + // Disable the EC2 Metadata service if the environment variable is set. + // This shortcirctes the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + // Add additional options to the service config for _, option := range opts { option(svc.Client) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 74f72de0..c04ba06c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -84,6 +84,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol custAddEC2Metadata(p) custAddS3DualStack(p) custRmIotDataService(p) + custFixAppAutoscalingChina(p) } return ps, nil @@ -122,6 +123,27 @@ func custRmIotDataService(p *partition) { delete(p.Services, "data.iot") } +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + type decodeModelError struct { awsError } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index fff35fc5..828bb3a4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -24,6 +24,7 @@ const ( EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). EuWest1RegionID = "eu-west-1" // EU (Ireland). EuWest2RegionID = "eu-west-2" // EU (London). + EuWest3RegionID = "eu-west-3" // EU (Paris). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -33,7 +34,8 @@ const ( // AWS China partition's regions. const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). ) // AWS GovCloud (US) partition's regions. @@ -43,15 +45,21 @@ const ( // Service identifiers const ( + A4bServiceID = "a4b" // A4b. AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. ApiPricingServiceID = "api.pricing" // ApiPricing. ApigatewayServiceID = "apigateway" // Apigateway. ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. Appstream2ServiceID = "appstream2" // Appstream2. AthenaServiceID = "athena" // Athena. AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. BatchServiceID = "batch" // Batch. BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + Cloud9ServiceID = "cloud9" // Cloud9. ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. CloudformationServiceID = "cloudformation" // Cloudformation. CloudfrontServiceID = "cloudfront" // Cloudfront. @@ -67,6 +75,7 @@ const ( CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. CognitoIdpServiceID = "cognito-idp" // CognitoIdp. CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. ConfigServiceID = "config" // Config. CurServiceID = "cur" // Cur. DatapipelineServiceID = "datapipeline" // Datapipeline. @@ -92,10 +101,12 @@ const ( EsServiceID = "es" // Es. EventsServiceID = "events" // Events. FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. GameliftServiceID = "gamelift" // Gamelift. GlacierServiceID = "glacier" // Glacier. GlueServiceID = "glue" // Glue. GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. HealthServiceID = "health" // Health. IamServiceID = "iam" // Iam. ImportexportServiceID = "importexport" // Importexport. @@ -103,18 +114,24 @@ const ( IotServiceID = "iot" // Iot. KinesisServiceID = "kinesis" // Kinesis. KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. KmsServiceID = "kms" // Kms. LambdaServiceID = "lambda" // Lambda. LightsailServiceID = "lightsail" // Lightsail. LogsServiceID = "logs" // Logs. MachinelearningServiceID = "machinelearning" // Machinelearning. MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. MghServiceID = "mgh" // Mgh. MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. ModelsLexServiceID = "models.lex" // ModelsLex. MonitoringServiceID = "monitoring" // Monitoring. MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. OpsworksServiceID = "opsworks" // Opsworks. OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. OrganizationsServiceID = "organizations" // Organizations. @@ -123,12 +140,18 @@ const ( RdsServiceID = "rds" // Rds. RedshiftServiceID = "redshift" // Redshift. RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. Route53ServiceID = "route53" // Route53. Route53domainsServiceID = "route53domains" // Route53domains. RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. S3ServiceID = "s3" // S3. + SagemakerServiceID = "sagemaker" // Sagemaker. SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. ShieldServiceID = "shield" // Shield. SmsServiceID = "sms" // Sms. SnowballServiceID = "snowball" // Snowball. @@ -142,9 +165,11 @@ const ( SupportServiceID = "support" // Support. SwfServiceID = "swf" // Swf. TaggingServiceID = "tagging" // Tagging. + TranslateServiceID = "translate" // Translate. WafServiceID = "waf" // Waf. WafRegionalServiceID = "waf-regional" // WafRegional. WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. WorkspacesServiceID = "workspaces" // Workspaces. XrayServiceID = "xray" // Xray. ) @@ -222,6 +247,9 @@ var awsPartition = partition{ "eu-west-2": region{ Description: "EU (London)", }, + "eu-west-3": region{ + Description: "EU (Paris)", + }, "sa-east-1": region{ Description: "South America (Sao Paulo)", }, @@ -239,6 +267,12 @@ var awsPartition = partition{ }, }, Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "acm": service{ Endpoints: endpoints{ @@ -251,6 +285,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -258,6 +293,33 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, "api.pricing": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -281,6 +343,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -306,6 +369,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -331,10 +395,13 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -354,6 +421,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -361,17 +429,38 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "autoscaling-plans", + }, + }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -388,11 +477,35 @@ var awsPartition = partition{ }, }, }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "clouddirectory": service{ Endpoints: endpoints{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -412,6 +525,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -449,7 +563,11 @@ var awsPartition = partition{ }, }, "cloudhsmv2": service{ - + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, @@ -458,6 +576,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -491,6 +610,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -502,16 +622,44 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "codecommit": service{ @@ -526,6 +674,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -545,6 +694,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -564,6 +714,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -574,8 +725,11 @@ var awsPartition = partition{ "codestar": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, @@ -633,6 +787,17 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ @@ -645,6 +810,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -673,9 +839,12 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, "eu-west-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -698,6 +867,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -723,6 +893,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -735,6 +906,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -762,6 +934,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -790,6 +963,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -813,12 +987,15 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -830,12 +1007,15 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -854,11 +1034,18 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticbeanstalk": service{ @@ -873,6 +1060,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -883,11 +1071,14 @@ var awsPartition = partition{ "elasticfilesystem": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -905,6 +1096,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -929,6 +1121,7 @@ var awsPartition = partition{ }, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{region}.{dnsSuffix}", @@ -981,6 +1174,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1000,6 +1194,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1011,13 +1206,32 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "gamelift": service{ Endpoints: endpoints{ @@ -1051,6 +1265,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1060,9 +1275,17 @@ var awsPartition = partition{ "glue": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "greengrass": service{ @@ -1074,10 +1297,34 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "health": service{ Endpoints: endpoints{ @@ -1119,8 +1366,10 @@ var awsPartition = partition{ "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1134,6 +1383,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -1156,6 +1406,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1166,9 +1417,20 @@ var awsPartition = partition{ "kinesisanalytics": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "kms": service{ @@ -1183,6 +1445,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1202,6 +1465,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1213,12 +1477,15 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1236,6 +1503,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1256,6 +1524,66 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "metering.marketplace": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -1272,6 +1600,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1298,7 +1627,9 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "monitoring": service{ @@ -1315,6 +1646,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1332,6 +1664,35 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "neptune": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "opsworks": service{ Endpoints: endpoints{ @@ -1340,9 +1701,11 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1353,9 +1716,15 @@ var awsPartition = partition{ "opsworks-cm": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "organizations": service{ @@ -1393,6 +1762,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1412,6 +1782,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{dnsSuffix}", @@ -1433,6 +1804,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1443,9 +1815,32 @@ var awsPartition = partition{ "rekognition": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "route53": service{ @@ -1476,6 +1871,20 @@ var awsPartition = partition{ Endpoints: endpoints{ "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "s3": service{ @@ -1510,6 +1919,7 @@ var awsPartition = partition{ SignatureVersions: []string{"s3", "s3v4"}, }, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "s3-external-1": endpoint{ Hostname: "s3-external-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -1536,6 +1946,19 @@ var awsPartition = partition{ }, }, }, + "sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "sdb": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -1554,7 +1977,7 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "servicecatalog": service{ + "secretsmanager": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1573,6 +1996,85 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "shield": service{ IsRegionalized: boxedFalse, Defaults: endpoint{ @@ -1589,11 +2091,17 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1602,10 +2110,13 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1627,6 +2138,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1649,7 +2161,32 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "queue.{dnsSuffix}", }, @@ -1670,6 +2207,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1681,12 +2219,17 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1702,6 +2245,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1726,6 +2270,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "local": endpoint{ Hostname: "localhost:8000", Protocols: []string{"http"}, @@ -1764,6 +2309,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -1813,6 +2359,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1832,6 +2379,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1839,6 +2387,17 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "waf": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -1856,8 +2415,11 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1873,15 +2435,28 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "workspaces": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1932,24 +2507,29 @@ var awscnPartition = partition{ "cn-north-1": region{ Description: "China (Beijing)", }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, }, Services: services{ "apigateway": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", + Hostname: "autoscaling.{region}.amazonaws.com.cn", Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Service: "application-autoscaling", }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "autoscaling": service{ @@ -1957,25 +2537,29 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "cloudformation": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "cloudtrail": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "codedeploy": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "cognito-identity": service{ @@ -1987,13 +2571,22 @@ var awscnPartition = partition{ "config": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "directconnect": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "dynamodb": service{ @@ -2001,7 +2594,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "ec2": service{ @@ -2009,7 +2603,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "ec2metadata": service{ @@ -2026,25 +2621,29 @@ var awscnPartition = partition{ "ecr": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "ecs": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticache": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticbeanstalk": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticloadbalancing": service{ @@ -2052,7 +2651,8 @@ var awscnPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "elasticmapreduce": service{ @@ -2060,14 +2660,21 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, }, }, - "es": service{}, "events": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "glacier": service{ @@ -2075,7 +2682,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "iam": service{ @@ -2104,19 +2712,22 @@ var awscnPartition = partition{ "kinesis": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "lambda": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "logs": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "monitoring": service{ @@ -2124,19 +2735,22 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "rds": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "redshift": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "s3": service{ @@ -2145,7 +2759,15 @@ var awscnPartition = partition{ SignatureVersions: []string{"s3v4"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "snowball": service{ @@ -2159,7 +2781,8 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "sqs": service{ @@ -2168,13 +2791,15 @@ var awscnPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "ssm": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "storagegateway": service{ @@ -2191,25 +2816,29 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "sts": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "swf": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "tagging": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, }, @@ -2273,6 +2902,16 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "cloudtrail": service{ Endpoints: endpoints{ @@ -2332,9 +2971,27 @@ var awsusgovPartition = partition{ }, }, }, + "ecr": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -2360,6 +3017,12 @@ var awsusgovPartition = partition{ }, }, }, + "es": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "events": service{ Endpoints: endpoints{ @@ -2387,6 +3050,12 @@ var awsusgovPartition = partition{ }, }, }, + "inspector": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ @@ -2411,12 +3080,28 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "monitoring": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, + "polly": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ @@ -2487,6 +3172,18 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "states": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "streams.dynamodb": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -2515,5 +3212,19 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 9c3eedb4..e29c0951 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -206,10 +206,11 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) ( // enumerating over the regions in a partition. func (p Partition) Regions() map[string]Region { rs := map[string]Region{} - for id := range p.p.Regions { + for id, r := range p.p.Regions { rs[id] = Region{ - id: id, - p: p.p, + id: id, + desc: r.Description, + p: p.p, } } @@ -240,6 +241,10 @@ type Region struct { // ID returns the region's identifier. func (r Region) ID() string { return r.id } +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + // ResolveEndpoint resolves an endpoint from the context of the region given // a service. See Partition.EndpointFor for usage and errors that can be returned. func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { @@ -284,10 +289,11 @@ func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (Resolve func (s Service) Regions() map[string]Region { rs := map[string]Region{} for id := range s.p.Services[s.id].Endpoints { - if _, ok := s.p.Regions[id]; ok { + if r, ok := s.p.Regions[id]; ok { rs[id] = Region{ - id: id, - p: s.p, + id: id, + desc: r.Description, + p: s.p, } } } @@ -347,6 +353,10 @@ type ResolvedEndpoint struct { // The service name that should be used for signing requests. SigningName string + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + // The signing method that should be used for signing requests. SigningMethod string } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index 13d968a2..ff6f76db 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -226,16 +226,20 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op if len(signingRegion) == 0 { signingRegion = region } + signingName := e.CredentialScope.Service + var signingNameDerived bool if len(signingName) == 0 { signingName = service + signingNameDerived = true } return ResolvedEndpoint{ - URL: u, - SigningRegion: signingRegion, - SigningName: signingName, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go index 3babb5ab..6ed15b2e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -71,6 +71,12 @@ const ( // LogDebugWithRequestErrors states the SDK should log when service requests fail // to build, send, validate, or unmarshal. LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody ) // A Logger is a minimalistic interface for the SDK to log messages to. Should diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 802ac88a..605a72d3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -14,6 +14,7 @@ type Handlers struct { Send HandlerList ValidateResponse HandlerList Unmarshal HandlerList + UnmarshalStream HandlerList UnmarshalMeta HandlerList UnmarshalError HandlerList Retry HandlerList @@ -30,6 +31,7 @@ func (h *Handlers) Copy() Handlers { Send: h.Send.copy(), ValidateResponse: h.ValidateResponse.copy(), Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), UnmarshalError: h.UnmarshalError.copy(), UnmarshalMeta: h.UnmarshalMeta.copy(), Retry: h.Retry.copy(), @@ -45,6 +47,7 @@ func (h *Handlers) Clear() { h.Send.Clear() h.Sign.Clear() h.Unmarshal.Clear() + h.UnmarshalStream.Clear() h.UnmarshalMeta.Clear() h.UnmarshalError.Clear() h.ValidateResponse.Clear() @@ -172,6 +175,21 @@ func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { return swapped } +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + // SetBackNamed will replace the named handler if it exists in the handler list. // If the handler does not exist the handler will be added to the end of the list. func (l *HandlerList) SetBackNamed(n NamedHandler) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index 02f07f4a..b0c2ef4f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -3,6 +3,8 @@ package request import ( "io" "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" ) // offsetReader is a thread-safe io.ReadCloser to prevent racing @@ -15,7 +17,7 @@ type offsetReader struct { func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { reader := &offsetReader{} - buf.Seek(offset, 0) + buf.Seek(offset, sdkio.SeekStart) reader.buf = buf return reader diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 5c7db498..75f0fe07 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" ) const ( @@ -45,6 +46,7 @@ type Request struct { Handlers Handlers Retryer + AttemptTime time.Time Time time.Time Operation *Operation HTTPRequest *http.Request @@ -120,6 +122,7 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, Handlers: handlers.Copy(), Retryer: retryer, + AttemptTime: time.Now(), Time: time.Now(), ExpireTime: 0, Operation: operation, @@ -224,6 +227,9 @@ func (r *Request) SetContext(ctx aws.Context) { // WillRetry returns if the request's can be retried. func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } @@ -255,6 +261,7 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.Body = reader + r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. r.ResetBody() } @@ -292,6 +299,11 @@ func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, err return getPresignedURL(r, expire) } +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { if expire <= 0 { return "", nil, awserr.New( @@ -332,7 +344,7 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) { // Build will build the request's object so it can be signed and sent // to the service. Build will also validate all the request's parameters. -// Anny additional build Handlers set on this request will be run +// Any additional build Handlers set on this request will be run // in the order they were set. // // The request will only be built once. Multiple calls to build will have @@ -358,9 +370,9 @@ func (r *Request) Build() error { return r.Error } -// Sign will sign the request returning error if errors are encountered. +// Sign will sign the request, returning error if errors are encountered. // -// Send will build the request prior to signing. All Sign Handlers will +// Sign will build the request prior to signing. All Sign Handlers will // be executed in the order they were set. func (r *Request) Sign() error { r.Build() @@ -393,7 +405,7 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // of the SDK if they used that field. // // Related golang/go#18257 - l, err := computeBodyLength(r.Body) + l, err := aws.SeekerLen(r.Body) if err != nil { return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) } @@ -411,7 +423,8 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { // Transfer-Encoding: chunked bodies for these methods. // // This would only happen if a aws.ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker. + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. switch r.Operation.HTTPMethod { case "GET", "HEAD", "DELETE": body = NoBody @@ -423,49 +436,13 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) { return body, nil } -// Attempts to compute the length of the body of the reader using the -// io.Seeker interface. If the value is not seekable because of being -// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned. -// If no error occurs the length of the body will be returned. -func computeBodyLength(r io.ReadSeeker) (int64, error) { - seekable := true - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := r.(type) { - case aws.ReaderSeekerCloser: - seekable = v.IsSeeker() - case *aws.ReaderSeekerCloser: - seekable = v.IsSeeker() - } - if !seekable { - return -1, nil - } - - curOffset, err := r.Seek(0, 1) - if err != nil { - return 0, err - } - - endOffset, err := r.Seek(0, 2) - if err != nil { - return 0, err - } - - _, err = r.Seek(curOffset, 0) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - // GetBody will return an io.ReadSeeker of the Request's underlying // input body with a concurrency safe wrapper. func (r *Request) GetBody() io.ReadSeeker { return r.safeBody } -// Send will send the request returning error if errors are encountered. +// Send will send the request, returning error if errors are encountered. // // Send will sign the request prior to sending. All Send Handlers will // be executed in the order they were set. @@ -486,6 +463,7 @@ func (r *Request) Send() error { }() for { + r.AttemptTime = time.Now() if aws.BoolValue(r.Retryable) { if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go index 869b97a1..e36e468b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -21,7 +21,7 @@ func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } var NoBody = noBody{} // ResetBody rewinds the request body back to its starting position, and -// set's the HTTP Request body reference. When the body is read prior +// sets the HTTP Request body reference. When the body is read prior // to being sent in the HTTP request it will need to be rewound. // // ResetBody will automatically be called by the SDK's build handler, but if diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index c32fc69b..7c6a8000 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -11,7 +11,7 @@ import ( var NoBody = http.NoBody // ResetBody rewinds the request body back to its starting position, and -// set's the HTTP Request body reference. When the body is read prior +// sets the HTTP Request body reference. When the body is read prior // to being sent in the HTTP request it will need to be rewound. // // ResetBody will automatically be called by the SDK's build handler, but if diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index 59de6736..a633ed5a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -35,8 +35,12 @@ type Pagination struct { // NewRequest should always be built from the same API operations. It is // undefined if different API operations are returned on subsequent calls. NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool started bool + prevTokens []interface{} nextTokens []interface{} err error @@ -49,7 +53,15 @@ type Pagination struct { // // Will always return true if Next has not been called yet. func (p *Pagination) HasNextPage() bool { - return !(p.started && len(p.nextTokens) == 0) + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage } // Err returns the error Pagination encountered when retrieving the next page. @@ -96,6 +108,7 @@ func (p *Pagination) Next() bool { return false } + p.prevTokens = p.nextTokens p.nextTokens = req.nextPageTokens() p.curPage = req.Data @@ -142,13 +155,28 @@ func (r *Request) nextPageTokens() []interface{} { tokens := []interface{}{} tokenAdded := false for _, outToken := range r.Operation.OutputTokens { - v, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(v) > 0 { - tokens = append(tokens, v[0]) - tokenAdded = true - } else { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { tokens = append(tokens, nil) + continue } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) } if !tokenAdded { return nil diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index f35fef21..7d527029 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -97,7 +97,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { } if t, ok := err.(temporaryError); ok { - return t.Temporary() + return t.Temporary() || isErrConnectionReset(err) } return isErrConnectionReset(err) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index ea7b886f..98d420fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -128,7 +128,7 @@ read. The Session will be created from configuration values from the shared credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). Credentials are the values the SDK should use for authenticating requests with -AWS Services. They arfrom a configuration file will need to include both +AWS Services. They are from a configuration file will need to include both aws_access_key_id and aws_secret_access_key must be provided together in the same file to be considered valid. The values will be ignored if not a complete group. aws_session_token is an optional field that can be provided if both of diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index f1adcf48..82e04d76 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -5,6 +5,7 @@ import ( "strconv" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" ) // EnvProviderName provides a name of the provider when config is loaded from environment. @@ -95,9 +96,23 @@ type envConfig struct { // // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle CustomCABundle string + + csmEnabled string + CSMEnabled bool + CSMPort string + CSMClientID string } var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } credAccessEnvKey = []string{ "AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY", @@ -156,6 +171,12 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + // Require logical grouping of credentials if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { cfg.Creds = credentials.Value{} @@ -176,6 +197,13 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") return cfg diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 9f75d5ac..51f30556 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -15,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" @@ -26,7 +27,7 @@ import ( // Sessions are safe to create service clients concurrently, but it is not safe // to mutate the Session concurrently. // -// The Session satisfies the service client's client.ClientConfigProvider. +// The Session satisfies the service client's client.ConfigProvider. type Session struct { Config *aws.Config Handlers request.Handlers @@ -58,7 +59,12 @@ func New(cfgs ...*aws.Config) *Session { envCfg := loadEnvConfig() if envCfg.EnableSharedConfig { - s, err := newSession(Options{}, envCfg, cfgs...) + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) if err != nil { // Old session.New expected all errors to be discovered when // a request is made, and would report the errors then. This @@ -76,10 +82,16 @@ func New(cfgs ...*aws.Config) *Session { r.Error = err }) } + return s } - return deprecatedNewSession(cfgs...) + s := deprecatedNewSession(cfgs...) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } + + return s } // NewSession returns a new Session created from SDK defaults, config files, @@ -243,13 +255,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) { envCfg.EnableSharedConfig = true } - if len(envCfg.SharedCredentialsFile) == 0 { - envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() - } - if len(envCfg.SharedConfigFile) == 0 { - envCfg.SharedConfigFile = defaults.SharedConfigFilename() - } - // Only use AWS_CA_BUNDLE if session option is not provided. if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { f, err := os.Open(envCfg.CustomCABundle) @@ -302,10 +307,22 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { } initHandlers(s) - return s } +func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { + logger.Log("Enabling CSM") + if len(port) == 0 { + port = csm.DefaultPort + } + + r, err := csm.Start(clientID, "127.0.0.1:"+port) + if err != nil { + return + } + r.InjectHandlers(handlers) +} + func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() handlers := defaults.Handlers() @@ -345,6 +362,9 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } // Setup HTTP client with custom cert bundle if enabled if opts.CustomCABundle != nil { @@ -573,11 +593,12 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( } return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningName: resolved.SigningName, + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, }, err } @@ -597,10 +618,11 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf } return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningName: resolved.SigningName, + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index ccc88b4a..8aa0681d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -71,6 +71,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" "github.com/aws/aws-sdk-go/private/protocol/rest" ) @@ -97,25 +98,25 @@ var ignoredHeaders = rules{ var requiredSignedHeaders = rules{ whitelist{ mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, @@ -134,6 +135,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, "X-Amz-Storage-Class": struct{}{}, "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, }, }, patterns{"X-Amz-Meta-"}, @@ -341,7 +343,9 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi ctx.sanitizeHostForHeader() ctx.assignAmzQueryValues() - ctx.build(v4.DisableHeaderHoisting) + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } // If the request is not presigned the body should be attached to it. This // prevents the confusion of wanting to send a signed request without @@ -503,11 +507,13 @@ func (v4 *Signer) logSigningInfo(ctx *signingCtx) { v4.Logger.Log(msg) } -func (ctx *signingCtx) build(disableHeaderHoisting bool) { +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { ctx.buildTime() // no depends ctx.buildCredentialString() // no depends - ctx.buildBodyDigest() + if err := ctx.buildBodyDigest(); err != nil { + return err + } unsignedHeaders := ctx.Request.Header if ctx.isPresign { @@ -535,6 +541,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) } + + return nil } func (ctx *signingCtx) buildTime() { @@ -661,21 +669,34 @@ func (ctx *signingCtx) buildSignature() { ctx.signature = hex.EncodeToString(signature) } -func (ctx *signingCtx) buildBodyDigest() { +func (ctx *signingCtx) buildBodyDigest() error { hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") if hash == "" { - if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign } else if ctx.Body == nil { hash = emptyStringSHA256 } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) } - if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + + if includeSHA256Header { ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) } } ctx.bodyDigest = hash + + return nil } // isRequestSigned returns if the request is currently signed or presigned @@ -715,8 +736,8 @@ func makeSha256(data []byte) []byte { func makeSha256Reader(reader io.ReadSeeker) []byte { hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) + start, _ := reader.Seek(0, sdkio.SeekCurrent) + defer reader.Seek(start, sdkio.SeekStart) io.Copy(hash, reader) return hash.Sum(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 0e2d864e..8b6f2342 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -3,6 +3,8 @@ package aws import ( "io" "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" ) // ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should @@ -22,6 +24,22 @@ type ReaderSeekerCloser struct { r io.Reader } +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // @@ -56,6 +74,71 @@ func (r ReaderSeekerCloser) IsSeeker() bool { return ok } +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + // Close closes the ReaderSeekerCloser. // // If the ReaderSeekerCloser is not an io.Closer nothing will be done. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 5c265d57..668171d3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.36" +const SDKVersion = "1.15.11" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 00000000..5aa9137e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 00000000..e5f00561 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 00000000..0c9802d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 00000000..38ea61af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go new file mode 100644 index 00000000..ecc7bf82 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + (*hs) = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go new file mode 100644 index 00000000..4b972b2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -0,0 +1,199 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + r io.Reader + logger aws.Logger +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the stream. +func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { + reader := d.r + if d.logger != nil { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.logger, debugMsgBuf, m, err) + }() + } + + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +// UseLogger specifies the Logger that that the decoder should use to log the +// message decode to. +func (d *Decoder) UseLogger(logger aws.Logger) { + d.logger = logger +} + +func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "Decode error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return uint8(v), err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return uint8(b[0]), err +} +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go new file mode 100644 index 00000000..150a6098 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -0,0 +1,114 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash" + "hash/crc32" + "io" +) + +// Encoder provides EventStream message encoding. +type Encoder struct { + w io.Writer + + headersBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages to an io.Writer. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + headersBuf: bytes.NewBuffer(nil), + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(msg Message) error { + e.headersBuf.Reset() + + err := encodeHeaders(e.headersBuf, msg.Headers) + if err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(e.w, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err := io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err := hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + return binary.Write(e.w, binary.BigEndian, msgCRC) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +func encodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go new file mode 100644 index 00000000..5481ef30 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go new file mode 100644 index 00000000..97937c8e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go @@ -0,0 +1,196 @@ +package eventstreamapi + +import ( + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Unmarshaler provides the interface for unmarshaling a EventStream +// message into a SDK type. +type Unmarshaler interface { + UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error +} + +// EventStream headers with specific meaning to async API functionality. +const ( + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) + +// EventReader provides reading from the EventStream of an reader. +type EventReader struct { + reader io.ReadCloser + decoder *eventstream.Decoder + + unmarshalerForEventType func(string) (Unmarshaler, error) + payloadUnmarshaler protocol.PayloadUnmarshaler + + payloadBuf []byte +} + +// NewEventReader returns a EventReader built from the reader and unmarshaler +// provided. Use ReadStream method to start reading from the EventStream. +func NewEventReader( + reader io.ReadCloser, + payloadUnmarshaler protocol.PayloadUnmarshaler, + unmarshalerForEventType func(string) (Unmarshaler, error), +) *EventReader { + return &EventReader{ + reader: reader, + decoder: eventstream.NewDecoder(reader), + payloadUnmarshaler: payloadUnmarshaler, + unmarshalerForEventType: unmarshalerForEventType, + payloadBuf: make([]byte, 10*1024), + } +} + +// UseLogger instructs the EventReader to use the logger and log level +// specified. +func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) { + if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) { + r.decoder.UseLogger(logger) + } +} + +// ReadEvent attempts to read a message from the EventStream and return the +// unmarshaled event value that the message is for. +// +// For EventStream API errors check if the returned error satisfies the +// awserr.Error interface to get the error's Code and Message components. +// +// EventUnmarshalers called with EventStream messages must take copies of the +// message's Payload. The payload will is reused between events read. +func (r *EventReader) ReadEvent() (event interface{}, err error) { + msg, err := r.decoder.Decode(r.payloadBuf) + if err != nil { + return nil, err + } + defer func() { + // Reclaim payload buffer for next message read. + r.payloadBuf = msg.Payload[0:0] + }() + + typ, err := GetHeaderString(msg, MessageTypeHeader) + if err != nil { + return nil, err + } + + switch typ { + case EventMessageType: + return r.unmarshalEventMessage(msg) + case ExceptionMessageType: + err = r.unmarshalEventException(msg) + return nil, err + case ErrorMessageType: + return nil, r.unmarshalErrorMessage(msg) + default: + return nil, fmt.Errorf("unknown eventstream message type, %v", typ) + } +} + +func (r *EventReader) unmarshalEventMessage( + msg eventstream.Message, +) (event interface{}, err error) { + eventType, err := GetHeaderString(msg, EventTypeHeader) + if err != nil { + return nil, err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return nil, err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return nil, err + } + + return ev, nil +} + +func (r *EventReader) unmarshalEventException( + msg eventstream.Message, +) (err error) { + eventType, err := GetHeaderString(msg, ExceptionTypeHeader) + if err != nil { + return err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return err + } + + var ok bool + err, ok = ev.(error) + if !ok { + err = messageError{ + code: "SerializationError", + msg: fmt.Sprintf( + "event stream exception %s mapped to non-error %T, %v", + eventType, ev, ev, + ), + } + } + + return err +} + +func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { + var msgErr messageError + + msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) + if err != nil { + return err + } + + msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) + if err != nil { + return err + } + + return msgErr +} + +// Close closes the EventReader's EventStream reader. +func (r *EventReader) Close() error { + return r.reader.Close() +} + +// GetHeaderString returns the value of the header as a string. If the header +// is not set or the value is not a string an error will be returned. +func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { + headerVal := msg.Headers.Get(headerName) + if headerVal == nil { + return "", fmt.Errorf("error header %s not present", headerName) + } + + v, ok := headerVal.Get().(string) + if !ok { + return "", fmt.Errorf("error header value is not a string, %T", headerVal) + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go new file mode 100644 index 00000000..5ea5a988 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -0,0 +1,24 @@ +package eventstreamapi + +import "fmt" + +type messageError struct { + code string + msg string +} + +func (e messageError) Code() string { + return e.code +} + +func (e messageError) Message() string { + return e.msg +} + +func (e messageError) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.msg) +} + +func (e messageError) OrigErr() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go new file mode 100644 index 00000000..3b44dde2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -0,0 +1,166 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go new file mode 100644 index 00000000..e3fc0766 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -0,0 +1,501 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go new file mode 100644 index 00000000..2dc012a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -0,0 +1,103 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := encodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 00000000..e21614a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "GET"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 18169f0f..60e5b09d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -25,7 +25,7 @@ func Build(r *request.Request) { return } - if r.ExpireTime == 0 { + if !r.IsPresigned() { r.HTTPRequest.Method = "POST" r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") r.SetBufferBody([]byte(body.Encode())) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 5ce9cba3..75866d01 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -233,7 +233,12 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) case time.Time: const ISO8601UTC = "2006-01-02T15:04:05Z" - v.Set(name, value.UTC().Format(ISO8601UTC)) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) default: return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index c405288d..b34f5258 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -20,9 +20,6 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) -// RFC822 returns an RFC822 formatted timestamp for AWS protocols -const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" - // Whether the byte value can be sent without escaping in AWS URLs var noEscape [256]bool @@ -270,7 +267,14 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) case float64: str = strconv.FormatFloat(value, 'f', -1, 64) case time.Time: - str = value.UTC().Format(RFC822) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) case aws.JSONValue: if len(value) == 0 { return "", errValueNotSet diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 823f045e..33fd53b1 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -198,7 +198,11 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro } v.Set(reflect.ValueOf(&f)) case *time.Time: - t, err := time.Parse(RFC822, header) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 00000000..b7ed6c6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,72 @@ +package protocol + +import ( + "strconv" + "time" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC() + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822TimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601TimeFormat) + case UnixTimeFormatName: + return strconv.FormatInt(t.Unix(), 10) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), 0), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index 7091b456..1bfe45f6 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -13,9 +13,13 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) -// BuildXML will serialize params into an xml.Encoder. -// Error will be returned if the serialization of any of the params or nested values fails. +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { b := xmlBuilder{encoder: e, namespaces: map[string]string{}} root := NewXMLElement(xml.Name{}) if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { @@ -23,7 +27,7 @@ func BuildXML(params interface{}, e *xml.Encoder) error { } for _, c := range root.Children { for _, v := range c { - return StructToXML(e, v, false) + return StructToXML(e, v, sorted) } } return nil @@ -90,8 +94,6 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl return nil } - fieldAdded := false - // unwrap payloads if payload := tag.Get("payload"); payload != "" { field, _ := value.Type().FieldByName(payload) @@ -119,6 +121,8 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl child.Attr = append(child.Attr, ns) } + var payloadFields, nonPayloadFields int + t := value.Type() for i := 0; i < value.NumField(); i++ { member := elemOf(value.Field(i)) @@ -133,8 +137,10 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl mTag := field.Tag if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ continue } + payloadFields++ if protocol.CanSetIdempotencyToken(value.Field(i), field) { token := protocol.GetIdempotencyToken() @@ -149,11 +155,11 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl if err := b.buildValue(member, child, mTag); err != nil { return err } - - fieldAdded = true } - if fieldAdded { // only append this child if we have one ore more valid members + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { current.AddChild(child) } @@ -278,8 +284,12 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl case float32: str = strconv.FormatFloat(float64(converted), 'f', -1, 32) case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - str = converted.UTC().Format(ISO8601UTC) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) default: return fmt.Errorf("unsupported value for param %s: %v (%s)", tag.Get("locationName"), value.Interface(), value.Type().Name()) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 87584628..ff1ef683 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -9,6 +9,8 @@ import ( "strconv" "strings" "time" + + "github.com/aws/aws-sdk-go/private/protocol" ) // UnmarshalXML deserializes an xml.Decoder into the container v. V @@ -52,9 +54,15 @@ func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { if t == "" { switch rtype.Kind() { case reflect.Struct: - t = "structure" + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } case reflect.Slice: - t = "list" + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } case reflect.Map: t = "map" } @@ -247,8 +255,12 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { } r.Set(reflect.ValueOf(&v)) case *time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - t, err := time.Parse(ISO8601UTC, node.Text) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 3e970b62..515ce152 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -29,6 +29,7 @@ func NewXMLElement(name xml.Name) *XMLNode { // AddChild adds child to the XMLNode. func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n if _, ok := n.Children[child.Name.Local]; !ok { n.Children[child.Name.Local] = []*XMLNode{} } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 564a3965..0e999ca3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -3,14 +3,22 @@ package s3 import ( + "bytes" "fmt" "io" + "sync" + "sync/atomic" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/rest" "github.com/aws/aws-sdk-go/private/protocol/restxml" ) @@ -18,7 +26,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // AbortMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the AbortMultipartUpload operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -39,7 +47,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { op := &request.Operation{ Name: opAbortMultipartUpload, @@ -75,7 +83,7 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // * ErrCodeNoSuchUpload "NoSuchUpload" // The specified multipart upload does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { req, out := c.AbortMultipartUploadRequest(input) return out, req.Send() @@ -101,7 +109,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload" // CompleteMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CompleteMultipartUpload operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -122,7 +130,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { op := &request.Operation{ Name: opCompleteMultipartUpload, @@ -149,7 +157,7 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation CompleteMultipartUpload for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { req, out := c.CompleteMultipartUploadRequest(input) return out, req.Send() @@ -175,7 +183,7 @@ const opCopyObject = "CopyObject" // CopyObjectRequest generates a "aws/request.Request" representing the // client's request for the CopyObject operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -196,7 +204,7 @@ const opCopyObject = "CopyObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { op := &request.Operation{ Name: opCopyObject, @@ -229,7 +237,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // The source object of the COPY operation is not in the active tier and is // only stored in Amazon Glacier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { req, out := c.CopyObjectRequest(input) return out, req.Send() @@ -255,7 +263,7 @@ const opCreateBucket = "CreateBucket" // CreateBucketRequest generates a "aws/request.Request" representing the // client's request for the CreateBucket operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -276,7 +284,7 @@ const opCreateBucket = "CreateBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { op := &request.Operation{ Name: opCreateBucket, @@ -311,7 +319,7 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // // * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { req, out := c.CreateBucketRequest(input) return out, req.Send() @@ -337,7 +345,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload" // CreateMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CreateMultipartUpload operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -358,7 +366,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { op := &request.Operation{ Name: opCreateMultipartUpload, @@ -391,7 +399,7 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation CreateMultipartUpload for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { req, out := c.CreateMultipartUploadRequest(input) return out, req.Send() @@ -417,7 +425,7 @@ const opDeleteBucket = "DeleteBucket" // DeleteBucketRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucket operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -438,7 +446,7 @@ const opDeleteBucket = "DeleteBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { op := &request.Operation{ Name: opDeleteBucket, @@ -468,7 +476,7 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucket for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { req, out := c.DeleteBucketRequest(input) return out, req.Send() @@ -494,7 +502,7 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration // DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -515,7 +523,7 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketAnalyticsConfiguration, @@ -545,7 +553,7 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) return out, req.Send() @@ -571,7 +579,7 @@ const opDeleteBucketCors = "DeleteBucketCors" // DeleteBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketCors operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -592,7 +600,7 @@ const opDeleteBucketCors = "DeleteBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { op := &request.Operation{ Name: opDeleteBucketCors, @@ -621,7 +629,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { req, out := c.DeleteBucketCorsRequest(input) return out, req.Send() @@ -647,7 +655,7 @@ const opDeleteBucketEncryption = "DeleteBucketEncryption" // DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketEncryption operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -668,7 +676,7 @@ const opDeleteBucketEncryption = "DeleteBucketEncryption" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { op := &request.Operation{ Name: opDeleteBucketEncryption, @@ -697,7 +705,7 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketEncryption for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { req, out := c.DeleteBucketEncryptionRequest(input) return out, req.Send() @@ -723,7 +731,7 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration // DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketInventoryConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -744,7 +752,7 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketInventoryConfiguration, @@ -774,7 +782,7 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { req, out := c.DeleteBucketInventoryConfigurationRequest(input) return out, req.Send() @@ -800,7 +808,7 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketLifecycle operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -821,7 +829,7 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { op := &request.Operation{ Name: opDeleteBucketLifecycle, @@ -850,7 +858,7 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { req, out := c.DeleteBucketLifecycleRequest(input) return out, req.Send() @@ -876,7 +884,7 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketMetricsConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -897,7 +905,7 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketMetricsConfiguration, @@ -927,7 +935,7 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { req, out := c.DeleteBucketMetricsConfigurationRequest(input) return out, req.Send() @@ -953,7 +961,7 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy" // DeleteBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketPolicy operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -974,7 +982,7 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { op := &request.Operation{ Name: opDeleteBucketPolicy, @@ -1003,7 +1011,7 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { req, out := c.DeleteBucketPolicyRequest(input) return out, req.Send() @@ -1029,7 +1037,7 @@ const opDeleteBucketReplication = "DeleteBucketReplication" // DeleteBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketReplication operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1050,7 +1058,7 @@ const opDeleteBucketReplication = "DeleteBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { op := &request.Operation{ Name: opDeleteBucketReplication, @@ -1079,7 +1087,7 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { req, out := c.DeleteBucketReplicationRequest(input) return out, req.Send() @@ -1105,7 +1113,7 @@ const opDeleteBucketTagging = "DeleteBucketTagging" // DeleteBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketTagging operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1126,7 +1134,7 @@ const opDeleteBucketTagging = "DeleteBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { op := &request.Operation{ Name: opDeleteBucketTagging, @@ -1155,7 +1163,7 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { req, out := c.DeleteBucketTaggingRequest(input) return out, req.Send() @@ -1181,7 +1189,7 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite" // DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketWebsite operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1202,7 +1210,7 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { op := &request.Operation{ Name: opDeleteBucketWebsite, @@ -1231,7 +1239,7 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { req, out := c.DeleteBucketWebsiteRequest(input) return out, req.Send() @@ -1257,7 +1265,7 @@ const opDeleteObject = "DeleteObject" // DeleteObjectRequest generates a "aws/request.Request" representing the // client's request for the DeleteObject operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1278,7 +1286,7 @@ const opDeleteObject = "DeleteObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { op := &request.Operation{ Name: opDeleteObject, @@ -1307,7 +1315,7 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { req, out := c.DeleteObjectRequest(input) return out, req.Send() @@ -1333,7 +1341,7 @@ const opDeleteObjectTagging = "DeleteObjectTagging" // DeleteObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjectTagging operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1354,7 +1362,7 @@ const opDeleteObjectTagging = "DeleteObjectTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { op := &request.Operation{ Name: opDeleteObjectTagging, @@ -1381,7 +1389,7 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { req, out := c.DeleteObjectTaggingRequest(input) return out, req.Send() @@ -1407,7 +1415,7 @@ const opDeleteObjects = "DeleteObjects" // DeleteObjectsRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjects operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1428,7 +1436,7 @@ const opDeleteObjects = "DeleteObjects" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { op := &request.Operation{ Name: opDeleteObjects, @@ -1456,7 +1464,7 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObjects for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { req, out := c.DeleteObjectsRequest(input) return out, req.Send() @@ -1482,7 +1490,7 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAccelerateConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1503,7 +1511,7 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { op := &request.Operation{ Name: opGetBucketAccelerateConfiguration, @@ -1530,7 +1538,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAccelerateConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { req, out := c.GetBucketAccelerateConfigurationRequest(input) return out, req.Send() @@ -1556,7 +1564,7 @@ const opGetBucketAcl = "GetBucketAcl" // GetBucketAclRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAcl operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1577,7 +1585,7 @@ const opGetBucketAcl = "GetBucketAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { op := &request.Operation{ Name: opGetBucketAcl, @@ -1604,7 +1612,7 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAcl for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { req, out := c.GetBucketAclRequest(input) return out, req.Send() @@ -1630,7 +1638,7 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAnalyticsConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1651,7 +1659,7 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opGetBucketAnalyticsConfiguration, @@ -1679,7 +1687,7 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { req, out := c.GetBucketAnalyticsConfigurationRequest(input) return out, req.Send() @@ -1705,7 +1713,7 @@ const opGetBucketCors = "GetBucketCors" // GetBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the GetBucketCors operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1726,7 +1734,7 @@ const opGetBucketCors = "GetBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { op := &request.Operation{ Name: opGetBucketCors, @@ -1753,7 +1761,7 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { req, out := c.GetBucketCorsRequest(input) return out, req.Send() @@ -1779,7 +1787,7 @@ const opGetBucketEncryption = "GetBucketEncryption" // GetBucketEncryptionRequest generates a "aws/request.Request" representing the // client's request for the GetBucketEncryption operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1800,7 +1808,7 @@ const opGetBucketEncryption = "GetBucketEncryption" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { op := &request.Operation{ Name: opGetBucketEncryption, @@ -1827,7 +1835,7 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketEncryption for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { req, out := c.GetBucketEncryptionRequest(input) return out, req.Send() @@ -1853,7 +1861,7 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketInventoryConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1874,7 +1882,7 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opGetBucketInventoryConfiguration, @@ -1902,7 +1910,7 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { req, out := c.GetBucketInventoryConfigurationRequest(input) return out, req.Send() @@ -1928,7 +1936,7 @@ const opGetBucketLifecycle = "GetBucketLifecycle" // GetBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycle operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1949,7 +1957,7 @@ const opGetBucketLifecycle = "GetBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") @@ -1979,7 +1987,7 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { req, out := c.GetBucketLifecycleRequest(input) return out, req.Send() @@ -2005,7 +2013,7 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycleConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2026,7 +2034,7 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opGetBucketLifecycleConfiguration, @@ -2053,7 +2061,7 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLifecycleConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { req, out := c.GetBucketLifecycleConfigurationRequest(input) return out, req.Send() @@ -2079,7 +2087,7 @@ const opGetBucketLocation = "GetBucketLocation" // GetBucketLocationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLocation operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2100,7 +2108,7 @@ const opGetBucketLocation = "GetBucketLocation" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { op := &request.Operation{ Name: opGetBucketLocation, @@ -2127,7 +2135,7 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLocation for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { req, out := c.GetBucketLocationRequest(input) return out, req.Send() @@ -2153,7 +2161,7 @@ const opGetBucketLogging = "GetBucketLogging" // GetBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLogging operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2174,7 +2182,7 @@ const opGetBucketLogging = "GetBucketLogging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { op := &request.Operation{ Name: opGetBucketLogging, @@ -2202,7 +2210,7 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLogging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { req, out := c.GetBucketLoggingRequest(input) return out, req.Send() @@ -2228,7 +2236,7 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketMetricsConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2249,7 +2257,7 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opGetBucketMetricsConfiguration, @@ -2277,7 +2285,7 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { req, out := c.GetBucketMetricsConfigurationRequest(input) return out, req.Send() @@ -2303,7 +2311,7 @@ const opGetBucketNotification = "GetBucketNotification" // GetBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotification operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2324,7 +2332,7 @@ const opGetBucketNotification = "GetBucketNotification" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") @@ -2354,7 +2362,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketNotification for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { req, out := c.GetBucketNotificationRequest(input) return out, req.Send() @@ -2380,7 +2388,7 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration // GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotificationConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2401,7 +2409,7 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { op := &request.Operation{ Name: opGetBucketNotificationConfiguration, @@ -2428,7 +2436,7 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketNotificationConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { req, out := c.GetBucketNotificationConfigurationRequest(input) return out, req.Send() @@ -2454,7 +2462,7 @@ const opGetBucketPolicy = "GetBucketPolicy" // GetBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetBucketPolicy operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2475,7 +2483,7 @@ const opGetBucketPolicy = "GetBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { op := &request.Operation{ Name: opGetBucketPolicy, @@ -2502,7 +2510,7 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { req, out := c.GetBucketPolicyRequest(input) return out, req.Send() @@ -2528,7 +2536,7 @@ const opGetBucketReplication = "GetBucketReplication" // GetBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketReplication operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2549,7 +2557,7 @@ const opGetBucketReplication = "GetBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { op := &request.Operation{ Name: opGetBucketReplication, @@ -2576,7 +2584,7 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { req, out := c.GetBucketReplicationRequest(input) return out, req.Send() @@ -2602,7 +2610,7 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment" // GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the GetBucketRequestPayment operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2623,7 +2631,7 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { op := &request.Operation{ Name: opGetBucketRequestPayment, @@ -2650,7 +2658,7 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketRequestPayment for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { req, out := c.GetBucketRequestPaymentRequest(input) return out, req.Send() @@ -2676,7 +2684,7 @@ const opGetBucketTagging = "GetBucketTagging" // GetBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketTagging operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2697,7 +2705,7 @@ const opGetBucketTagging = "GetBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { op := &request.Operation{ Name: opGetBucketTagging, @@ -2724,7 +2732,7 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { req, out := c.GetBucketTaggingRequest(input) return out, req.Send() @@ -2750,7 +2758,7 @@ const opGetBucketVersioning = "GetBucketVersioning" // GetBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the GetBucketVersioning operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2771,7 +2779,7 @@ const opGetBucketVersioning = "GetBucketVersioning" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { op := &request.Operation{ Name: opGetBucketVersioning, @@ -2798,7 +2806,7 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketVersioning for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { req, out := c.GetBucketVersioningRequest(input) return out, req.Send() @@ -2824,7 +2832,7 @@ const opGetBucketWebsite = "GetBucketWebsite" // GetBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the GetBucketWebsite operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2845,7 +2853,7 @@ const opGetBucketWebsite = "GetBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { op := &request.Operation{ Name: opGetBucketWebsite, @@ -2872,7 +2880,7 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { req, out := c.GetBucketWebsiteRequest(input) return out, req.Send() @@ -2898,7 +2906,7 @@ const opGetObject = "GetObject" // GetObjectRequest generates a "aws/request.Request" representing the // client's request for the GetObject operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2919,7 +2927,7 @@ const opGetObject = "GetObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { op := &request.Operation{ Name: opGetObject, @@ -2951,7 +2959,7 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { req, out := c.GetObjectRequest(input) return out, req.Send() @@ -2977,7 +2985,7 @@ const opGetObjectAcl = "GetObjectAcl" // GetObjectAclRequest generates a "aws/request.Request" representing the // client's request for the GetObjectAcl operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -2998,7 +3006,7 @@ const opGetObjectAcl = "GetObjectAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { op := &request.Operation{ Name: opGetObjectAcl, @@ -3030,7 +3038,7 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { req, out := c.GetObjectAclRequest(input) return out, req.Send() @@ -3056,7 +3064,7 @@ const opGetObjectTagging = "GetObjectTagging" // GetObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the GetObjectTagging operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3077,7 +3085,7 @@ const opGetObjectTagging = "GetObjectTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { op := &request.Operation{ Name: opGetObjectTagging, @@ -3104,7 +3112,7 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { req, out := c.GetObjectTaggingRequest(input) return out, req.Send() @@ -3130,7 +3138,7 @@ const opGetObjectTorrent = "GetObjectTorrent" // GetObjectTorrentRequest generates a "aws/request.Request" representing the // client's request for the GetObjectTorrent operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3151,7 +3159,7 @@ const opGetObjectTorrent = "GetObjectTorrent" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { op := &request.Operation{ Name: opGetObjectTorrent, @@ -3178,7 +3186,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetObjectTorrent for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { req, out := c.GetObjectTorrentRequest(input) return out, req.Send() @@ -3204,7 +3212,7 @@ const opHeadBucket = "HeadBucket" // HeadBucketRequest generates a "aws/request.Request" representing the // client's request for the HeadBucket operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3225,7 +3233,7 @@ const opHeadBucket = "HeadBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { op := &request.Operation{ Name: opHeadBucket, @@ -3260,7 +3268,7 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { req, out := c.HeadBucketRequest(input) return out, req.Send() @@ -3286,7 +3294,7 @@ const opHeadObject = "HeadObject" // HeadObjectRequest generates a "aws/request.Request" representing the // client's request for the HeadObject operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3307,7 +3315,7 @@ const opHeadObject = "HeadObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { op := &request.Operation{ Name: opHeadObject, @@ -3339,7 +3347,7 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation HeadObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { req, out := c.HeadObjectRequest(input) return out, req.Send() @@ -3365,7 +3373,7 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" // ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketAnalyticsConfigurations operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3386,7 +3394,7 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { op := &request.Operation{ Name: opListBucketAnalyticsConfigurations, @@ -3413,7 +3421,7 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketAnalyticsConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { req, out := c.ListBucketAnalyticsConfigurationsRequest(input) return out, req.Send() @@ -3439,7 +3447,7 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketInventoryConfigurations operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3460,7 +3468,7 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { op := &request.Operation{ Name: opListBucketInventoryConfigurations, @@ -3487,7 +3495,7 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketInventoryConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { req, out := c.ListBucketInventoryConfigurationsRequest(input) return out, req.Send() @@ -3513,7 +3521,7 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" // ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketMetricsConfigurations operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3534,7 +3542,7 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { op := &request.Operation{ Name: opListBucketMetricsConfigurations, @@ -3561,7 +3569,7 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketMetricsConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { req, out := c.ListBucketMetricsConfigurationsRequest(input) return out, req.Send() @@ -3587,7 +3595,7 @@ const opListBuckets = "ListBuckets" // ListBucketsRequest generates a "aws/request.Request" representing the // client's request for the ListBuckets operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3608,7 +3616,7 @@ const opListBuckets = "ListBuckets" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { op := &request.Operation{ Name: opListBuckets, @@ -3635,7 +3643,7 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBuckets for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { req, out := c.ListBucketsRequest(input) return out, req.Send() @@ -3661,7 +3669,7 @@ const opListMultipartUploads = "ListMultipartUploads" // ListMultipartUploadsRequest generates a "aws/request.Request" representing the // client's request for the ListMultipartUploads operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3682,7 +3690,7 @@ const opListMultipartUploads = "ListMultipartUploads" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { op := &request.Operation{ Name: opListMultipartUploads, @@ -3715,7 +3723,7 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListMultipartUploads for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { req, out := c.ListMultipartUploadsRequest(input) return out, req.Send() @@ -3791,7 +3799,7 @@ const opListObjectVersions = "ListObjectVersions" // ListObjectVersionsRequest generates a "aws/request.Request" representing the // client's request for the ListObjectVersions operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3812,7 +3820,7 @@ const opListObjectVersions = "ListObjectVersions" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { op := &request.Operation{ Name: opListObjectVersions, @@ -3845,7 +3853,7 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListObjectVersions for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { req, out := c.ListObjectVersionsRequest(input) return out, req.Send() @@ -3921,7 +3929,7 @@ const opListObjects = "ListObjects" // ListObjectsRequest generates a "aws/request.Request" representing the // client's request for the ListObjects operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -3942,7 +3950,7 @@ const opListObjects = "ListObjects" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { op := &request.Operation{ Name: opListObjects, @@ -3982,7 +3990,7 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { req, out := c.ListObjectsRequest(input) return out, req.Send() @@ -4058,7 +4066,7 @@ const opListObjectsV2 = "ListObjectsV2" // ListObjectsV2Request generates a "aws/request.Request" representing the // client's request for the ListObjectsV2 operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4079,7 +4087,7 @@ const opListObjectsV2 = "ListObjectsV2" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { op := &request.Operation{ Name: opListObjectsV2, @@ -4120,7 +4128,7 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { req, out := c.ListObjectsV2Request(input) return out, req.Send() @@ -4196,7 +4204,7 @@ const opListParts = "ListParts" // ListPartsRequest generates a "aws/request.Request" representing the // client's request for the ListParts operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4217,7 +4225,7 @@ const opListParts = "ListParts" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { op := &request.Operation{ Name: opListParts, @@ -4250,7 +4258,7 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListParts for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { req, out := c.ListPartsRequest(input) return out, req.Send() @@ -4326,7 +4334,7 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" // PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAccelerateConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4347,7 +4355,7 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { op := &request.Operation{ Name: opPutBucketAccelerateConfiguration, @@ -4376,7 +4384,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAccelerateConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { req, out := c.PutBucketAccelerateConfigurationRequest(input) return out, req.Send() @@ -4402,7 +4410,7 @@ const opPutBucketAcl = "PutBucketAcl" // PutBucketAclRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAcl operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4423,7 +4431,7 @@ const opPutBucketAcl = "PutBucketAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { op := &request.Operation{ Name: opPutBucketAcl, @@ -4452,7 +4460,7 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAcl for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { req, out := c.PutBucketAclRequest(input) return out, req.Send() @@ -4478,7 +4486,7 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" // PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAnalyticsConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4499,7 +4507,7 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opPutBucketAnalyticsConfiguration, @@ -4529,7 +4537,7 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { req, out := c.PutBucketAnalyticsConfigurationRequest(input) return out, req.Send() @@ -4555,7 +4563,7 @@ const opPutBucketCors = "PutBucketCors" // PutBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the PutBucketCors operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4576,7 +4584,7 @@ const opPutBucketCors = "PutBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { op := &request.Operation{ Name: opPutBucketCors, @@ -4605,7 +4613,7 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { req, out := c.PutBucketCorsRequest(input) return out, req.Send() @@ -4631,7 +4639,7 @@ const opPutBucketEncryption = "PutBucketEncryption" // PutBucketEncryptionRequest generates a "aws/request.Request" representing the // client's request for the PutBucketEncryption operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4652,7 +4660,7 @@ const opPutBucketEncryption = "PutBucketEncryption" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { op := &request.Operation{ Name: opPutBucketEncryption, @@ -4682,7 +4690,7 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketEncryption for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { req, out := c.PutBucketEncryptionRequest(input) return out, req.Send() @@ -4708,7 +4716,7 @@ const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketInventoryConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4729,7 +4737,7 @@ const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opPutBucketInventoryConfiguration, @@ -4759,7 +4767,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { req, out := c.PutBucketInventoryConfigurationRequest(input) return out, req.Send() @@ -4785,7 +4793,7 @@ const opPutBucketLifecycle = "PutBucketLifecycle" // PutBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLifecycle operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4806,7 +4814,7 @@ const opPutBucketLifecycle = "PutBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") @@ -4838,7 +4846,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { req, out := c.PutBucketLifecycleRequest(input) return out, req.Send() @@ -4864,7 +4872,7 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" // PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLifecycleConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4885,7 +4893,7 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opPutBucketLifecycleConfiguration, @@ -4915,7 +4923,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLifecycleConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { req, out := c.PutBucketLifecycleConfigurationRequest(input) return out, req.Send() @@ -4941,7 +4949,7 @@ const opPutBucketLogging = "PutBucketLogging" // PutBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLogging operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -4962,7 +4970,7 @@ const opPutBucketLogging = "PutBucketLogging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { op := &request.Operation{ Name: opPutBucketLogging, @@ -4993,7 +5001,7 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLogging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { req, out := c.PutBucketLoggingRequest(input) return out, req.Send() @@ -5019,7 +5027,7 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" // PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketMetricsConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5040,7 +5048,7 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opPutBucketMetricsConfiguration, @@ -5070,7 +5078,7 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { req, out := c.PutBucketMetricsConfigurationRequest(input) return out, req.Send() @@ -5096,7 +5104,7 @@ const opPutBucketNotification = "PutBucketNotification" // PutBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketNotification operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5117,7 +5125,7 @@ const opPutBucketNotification = "PutBucketNotification" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") @@ -5149,7 +5157,7 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketNotification for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { req, out := c.PutBucketNotificationRequest(input) return out, req.Send() @@ -5175,7 +5183,7 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration // PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketNotificationConfiguration operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5196,7 +5204,7 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { op := &request.Operation{ Name: opPutBucketNotificationConfiguration, @@ -5225,7 +5233,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketNotificationConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { req, out := c.PutBucketNotificationConfigurationRequest(input) return out, req.Send() @@ -5251,7 +5259,7 @@ const opPutBucketPolicy = "PutBucketPolicy" // PutBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the PutBucketPolicy operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5272,7 +5280,7 @@ const opPutBucketPolicy = "PutBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { op := &request.Operation{ Name: opPutBucketPolicy, @@ -5302,7 +5310,7 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { req, out := c.PutBucketPolicyRequest(input) return out, req.Send() @@ -5328,7 +5336,7 @@ const opPutBucketReplication = "PutBucketReplication" // PutBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketReplication operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5349,7 +5357,7 @@ const opPutBucketReplication = "PutBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { op := &request.Operation{ Name: opPutBucketReplication, @@ -5379,7 +5387,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { req, out := c.PutBucketReplicationRequest(input) return out, req.Send() @@ -5405,7 +5413,7 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment" // PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the PutBucketRequestPayment operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5426,7 +5434,7 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { op := &request.Operation{ Name: opPutBucketRequestPayment, @@ -5459,7 +5467,7 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketRequestPayment for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { req, out := c.PutBucketRequestPaymentRequest(input) return out, req.Send() @@ -5485,7 +5493,7 @@ const opPutBucketTagging = "PutBucketTagging" // PutBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the PutBucketTagging operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5506,7 +5514,7 @@ const opPutBucketTagging = "PutBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { op := &request.Operation{ Name: opPutBucketTagging, @@ -5535,7 +5543,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { req, out := c.PutBucketTaggingRequest(input) return out, req.Send() @@ -5561,7 +5569,7 @@ const opPutBucketVersioning = "PutBucketVersioning" // PutBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the PutBucketVersioning operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5582,7 +5590,7 @@ const opPutBucketVersioning = "PutBucketVersioning" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { op := &request.Operation{ Name: opPutBucketVersioning, @@ -5612,7 +5620,7 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketVersioning for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { req, out := c.PutBucketVersioningRequest(input) return out, req.Send() @@ -5638,7 +5646,7 @@ const opPutBucketWebsite = "PutBucketWebsite" // PutBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the PutBucketWebsite operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5659,7 +5667,7 @@ const opPutBucketWebsite = "PutBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { op := &request.Operation{ Name: opPutBucketWebsite, @@ -5688,7 +5696,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { req, out := c.PutBucketWebsiteRequest(input) return out, req.Send() @@ -5714,7 +5722,7 @@ const opPutObject = "PutObject" // PutObjectRequest generates a "aws/request.Request" representing the // client's request for the PutObject operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5735,7 +5743,7 @@ const opPutObject = "PutObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { op := &request.Operation{ Name: opPutObject, @@ -5762,7 +5770,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { req, out := c.PutObjectRequest(input) return out, req.Send() @@ -5788,7 +5796,7 @@ const opPutObjectAcl = "PutObjectAcl" // PutObjectAclRequest generates a "aws/request.Request" representing the // client's request for the PutObjectAcl operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5809,7 +5817,7 @@ const opPutObjectAcl = "PutObjectAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { op := &request.Operation{ Name: opPutObjectAcl, @@ -5842,7 +5850,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { req, out := c.PutObjectAclRequest(input) return out, req.Send() @@ -5868,7 +5876,7 @@ const opPutObjectTagging = "PutObjectTagging" // PutObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the PutObjectTagging operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5889,7 +5897,7 @@ const opPutObjectTagging = "PutObjectTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { op := &request.Operation{ Name: opPutObjectTagging, @@ -5916,7 +5924,7 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { req, out := c.PutObjectTaggingRequest(input) return out, req.Send() @@ -5942,7 +5950,7 @@ const opRestoreObject = "RestoreObject" // RestoreObjectRequest generates a "aws/request.Request" representing the // client's request for the RestoreObject operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -5963,7 +5971,7 @@ const opRestoreObject = "RestoreObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { op := &request.Operation{ Name: opRestoreObject, @@ -5995,7 +6003,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" // This operation is not allowed against this storage tier // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { req, out := c.RestoreObjectRequest(input) return out, req.Send() @@ -6017,11 +6025,93 @@ func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput return out, req.Send() } +const opSelectObjectContent = "SelectObjectContent" + +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { + op := &request.Operation{ + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + } + + if input == nil { + input = &SelectObjectContentInput{} + } + + output = &SelectObjectContentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop) + return +} + +// SelectObjectContent API operation for Amazon Simple Storage Service. +// +// This operation filters the contents of an Amazon S3 object based on a simple +// Structured Query Language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON or +// CSV) of the object. Amazon S3 uses this to parse object data into records, +// and returns only records that match the specified SQL expression. You must +// also specify the data serialization format for the response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation SelectObjectContent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + return out, req.Send() +} + +// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of +// the ability to pass a context and additional request options. +// +// See SelectObjectContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUploadPart = "UploadPart" // UploadPartRequest generates a "aws/request.Request" representing the // client's request for the UploadPart operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6042,7 +6132,7 @@ const opUploadPart = "UploadPart" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { op := &request.Operation{ Name: opUploadPart, @@ -6075,7 +6165,7 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation UploadPart for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { req, out := c.UploadPartRequest(input) return out, req.Send() @@ -6101,7 +6191,7 @@ const opUploadPartCopy = "UploadPartCopy" // UploadPartCopyRequest generates a "aws/request.Request" representing the // client's request for the UploadPartCopy operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -6122,7 +6212,7 @@ const opUploadPartCopy = "UploadPartCopy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { op := &request.Operation{ Name: opUploadPartCopy, @@ -6149,7 +6239,7 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation UploadPartCopy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { req, out := c.UploadPartCopyRequest(input) return out, req.Send() @@ -6173,7 +6263,6 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp // Specifies the days since the initiation of an Incomplete Multipart Upload // that Lifecycle will wait before permanently removing all parts of the upload. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` @@ -6198,7 +6287,6 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest type AbortMultipartUploadInput struct { _ struct{} `type:"structure"` @@ -6281,7 +6369,6 @@ func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput type AbortMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -6306,7 +6393,6 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration type AccelerateConfiguration struct { _ struct{} `type:"structure"` @@ -6330,7 +6416,6 @@ func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy type AccessControlPolicy struct { _ struct{} `type:"structure"` @@ -6383,7 +6468,6 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { } // Container for information regarding the access control for replicas. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlTranslation type AccessControlTranslation struct { _ struct{} `type:"structure"` @@ -6422,7 +6506,6 @@ func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator type AnalyticsAndOperator struct { _ struct{} `type:"structure"` @@ -6475,7 +6558,6 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration type AnalyticsConfiguration struct { _ struct{} `type:"structure"` @@ -6550,7 +6632,6 @@ func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination type AnalyticsExportDestination struct { _ struct{} `type:"structure"` @@ -6594,7 +6675,6 @@ func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3Bucket return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter type AnalyticsFilter struct { _ struct{} `type:"structure"` @@ -6657,7 +6737,6 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination type AnalyticsS3BucketDestination struct { _ struct{} `type:"structure"` @@ -6737,12 +6816,11 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket type Bucket struct { _ struct{} `type:"structure"` // Date the bucket was created. - CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + CreationDate *time.Time `type:"timestamp"` // The name of the bucket. Name *string `type:"string"` @@ -6770,7 +6848,6 @@ func (s *Bucket) SetName(v string) *Bucket { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration type BucketLifecycleConfiguration struct { _ struct{} `type:"structure"` @@ -6817,10 +6894,12 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus type BucketLoggingStatus struct { _ struct{} `type:"structure"` + // Container for logging information. Presence of this element indicates that + // logging is enabled. Parameters TargetBucket and TargetPrefix are required + // in this case. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -6855,7 +6934,6 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration type CORSConfiguration struct { _ struct{} `type:"structure"` @@ -6902,7 +6980,6 @@ func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule type CORSRule struct { _ struct{} `type:"structure"` @@ -6987,10 +7064,14 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { } // Describes how a CSV-formatted input object is formatted. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CSVInput type CSVInput struct { _ struct{} `type:"structure"` + // Specifies that CSV field values may contain quoted record delimiters and + // such records should be allowed. Default value is FALSE. Setting this value + // to TRUE may lower performance. + AllowQuotedRecordDelimiter *bool `type:"boolean"` + // Single character used to indicate a row should be ignored when present at // the start of a row. Comments *string `type:"string"` @@ -7022,6 +7103,12 @@ func (s CSVInput) GoString() string { return s.String() } +// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. +func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { + s.AllowQuotedRecordDelimiter = &v + return s +} + // SetComments sets the Comments field's value. func (s *CSVInput) SetComments(v string) *CSVInput { s.Comments = &v @@ -7059,7 +7146,6 @@ func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { } // Describes how CSV-formatted results are formatted. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CSVOutput type CSVOutput struct { _ struct{} `type:"structure"` @@ -7120,7 +7206,6 @@ func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration type CloudFunctionConfiguration struct { _ struct{} `type:"structure"` @@ -7178,7 +7263,6 @@ func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionC return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix type CommonPrefix struct { _ struct{} `type:"structure"` @@ -7201,7 +7285,6 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest type CompleteMultipartUploadInput struct { _ struct{} `type:"structure" payload:"MultipartUpload"` @@ -7292,7 +7375,6 @@ func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartU return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput type CompleteMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -7396,7 +7478,6 @@ func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipar return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload type CompletedMultipartUpload struct { _ struct{} `type:"structure"` @@ -7419,7 +7500,6 @@ func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultip return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart type CompletedPart struct { _ struct{} `type:"structure"` @@ -7453,7 +7533,6 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition type Condition struct { _ struct{} `type:"structure"` @@ -7496,7 +7575,32 @@ func (s *Condition) SetKeyPrefixEquals(v string) *Condition { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest +type ContinuationEvent struct { + _ struct{} `locationName:"ContinuationEvent" type:"structure"` +} + +// String returns the string representation +func (s ContinuationEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinuationEvent) GoString() string { + return s.String() +} + +// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + type CopyObjectInput struct { _ struct{} `type:"structure"` @@ -7533,14 +7637,14 @@ type CopyObjectInput struct { CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` // Copies the object if its entity tag (ETag) is different than the specified // ETag. CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` // Specifies the algorithm to use when decrypting the source object (e.g., AES256). CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` @@ -7556,7 +7660,7 @@ type CopyObjectInput struct { CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` @@ -7880,7 +7984,6 @@ func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` @@ -7981,13 +8084,12 @@ func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult type CopyObjectResult struct { _ struct{} `type:"structure"` ETag *string `type:"string"` - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` } // String returns the string representation @@ -8012,7 +8114,6 @@ func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult type CopyPartResult struct { _ struct{} `type:"structure"` @@ -8020,7 +8121,7 @@ type CopyPartResult struct { ETag *string `type:"string"` // Date and time at which the object was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` } // String returns the string representation @@ -8045,7 +8146,6 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration type CreateBucketConfiguration struct { _ struct{} `type:"structure"` @@ -8070,7 +8170,6 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest type CreateBucketInput struct { _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` @@ -8177,7 +8276,6 @@ func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput type CreateBucketOutput struct { _ struct{} `type:"structure"` @@ -8200,7 +8298,6 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest type CreateMultipartUploadInput struct { _ struct{} `type:"structure"` @@ -8228,7 +8325,7 @@ type CreateMultipartUploadInput struct { ContentType *string `location:"header" locationName:"Content-Type" type:"string"` // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` @@ -8472,12 +8569,11 @@ func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *Creat return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput type CreateMultipartUploadOutput struct { _ struct{} `type:"structure"` // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` // Id of the lifecycle rule that makes a multipart upload eligible for abort // operation. @@ -8592,7 +8688,6 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete type Delete struct { _ struct{} `type:"structure"` @@ -8649,7 +8744,6 @@ func (s *Delete) SetQuiet(v bool) *Delete { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest type DeleteBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure"` @@ -8709,7 +8803,6 @@ func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketA return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput type DeleteBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -8724,7 +8817,6 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest type DeleteBucketCorsInput struct { _ struct{} `type:"structure"` @@ -8768,7 +8860,6 @@ func (s *DeleteBucketCorsInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -8783,7 +8874,6 @@ func (s DeleteBucketCorsOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryptionRequest type DeleteBucketEncryptionInput struct { _ struct{} `type:"structure"` @@ -8830,7 +8920,6 @@ func (s *DeleteBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryptionOutput type DeleteBucketEncryptionOutput struct { _ struct{} `type:"structure"` } @@ -8845,7 +8934,6 @@ func (s DeleteBucketEncryptionOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest type DeleteBucketInput struct { _ struct{} `type:"structure"` @@ -8889,7 +8977,6 @@ func (s *DeleteBucketInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest type DeleteBucketInventoryConfigurationInput struct { _ struct{} `type:"structure"` @@ -8949,7 +9036,6 @@ func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput type DeleteBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -8964,7 +9050,6 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest type DeleteBucketLifecycleInput struct { _ struct{} `type:"structure"` @@ -9008,7 +9093,6 @@ func (s *DeleteBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -9023,7 +9107,6 @@ func (s DeleteBucketLifecycleOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest type DeleteBucketMetricsConfigurationInput struct { _ struct{} `type:"structure"` @@ -9083,7 +9166,6 @@ func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMet return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput type DeleteBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -9098,7 +9180,6 @@ func (s DeleteBucketMetricsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput type DeleteBucketOutput struct { _ struct{} `type:"structure"` } @@ -9113,7 +9194,6 @@ func (s DeleteBucketOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest type DeleteBucketPolicyInput struct { _ struct{} `type:"structure"` @@ -9157,7 +9237,6 @@ func (s *DeleteBucketPolicyInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -9172,7 +9251,6 @@ func (s DeleteBucketPolicyOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest type DeleteBucketReplicationInput struct { _ struct{} `type:"structure"` @@ -9216,7 +9294,6 @@ func (s *DeleteBucketReplicationInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -9231,7 +9308,6 @@ func (s DeleteBucketReplicationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest type DeleteBucketTaggingInput struct { _ struct{} `type:"structure"` @@ -9275,7 +9351,6 @@ func (s *DeleteBucketTaggingInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -9290,7 +9365,6 @@ func (s DeleteBucketTaggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest type DeleteBucketWebsiteInput struct { _ struct{} `type:"structure"` @@ -9334,7 +9408,6 @@ func (s *DeleteBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -9349,7 +9422,6 @@ func (s DeleteBucketWebsiteOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry type DeleteMarkerEntry struct { _ struct{} `type:"structure"` @@ -9361,7 +9433,7 @@ type DeleteMarkerEntry struct { Key *string `min:"1" type:"string"` // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` Owner *Owner `type:"structure"` @@ -9409,7 +9481,6 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest type DeleteObjectInput struct { _ struct{} `type:"structure"` @@ -9499,7 +9570,6 @@ func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput type DeleteObjectOutput struct { _ struct{} `type:"structure"` @@ -9544,7 +9614,6 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest type DeleteObjectTaggingInput struct { _ struct{} `type:"structure"` @@ -9612,7 +9681,6 @@ func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingIn return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput type DeleteObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -9636,7 +9704,6 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest type DeleteObjectsInput struct { _ struct{} `type:"structure" payload:"Delete"` @@ -9719,7 +9786,6 @@ func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput type DeleteObjectsOutput struct { _ struct{} `type:"structure"` @@ -9760,7 +9826,6 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject type DeletedObject struct { _ struct{} `type:"structure"` @@ -9808,7 +9873,6 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject { } // Container for replication destination information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination type Destination struct { _ struct{} `type:"structure"` @@ -9899,7 +9963,6 @@ func (s *Destination) SetStorageClass(v string) *Destination { // Describes the server-side encryption that will be applied to the restore // results. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Encryption type Encryption struct { _ struct{} `type:"structure"` @@ -9960,7 +10023,6 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption { } // Container for information regarding encryption based configuration for replicas. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/EncryptionConfiguration type EncryptionConfiguration struct { _ struct{} `type:"structure"` @@ -9984,7 +10046,32 @@ func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfig return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error +type EndEvent struct { + _ struct{} `locationName:"EndEvent" type:"structure"` +} + +// String returns the string representation +func (s EndEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EndEvent) GoString() string { + return s.String() +} + +// The EndEvent is and event in the SelectObjectContentEventStream group of events. +func (s *EndEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *EndEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + type Error struct { _ struct{} `type:"structure"` @@ -10031,7 +10118,6 @@ func (s *Error) SetVersionId(v string) *Error { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument type ErrorDocument struct { _ struct{} `type:"structure"` @@ -10074,7 +10160,6 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument { } // Container for key value pair that defines the criteria for the filter rule. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule type FilterRule struct { _ struct{} `type:"structure"` @@ -10082,6 +10167,7 @@ type FilterRule struct { // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. // Overlapping prefixes and suffixes are not supported. For more information, // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string" enum:"FilterRuleName"` Value *string `type:"string"` @@ -10109,7 +10195,6 @@ func (s *FilterRule) SetValue(v string) *FilterRule { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest type GetBucketAccelerateConfigurationInput struct { _ struct{} `type:"structure"` @@ -10155,7 +10240,6 @@ func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput type GetBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` @@ -10179,7 +10263,6 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest type GetBucketAclInput struct { _ struct{} `type:"structure"` @@ -10223,7 +10306,6 @@ func (s *GetBucketAclInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput type GetBucketAclOutput struct { _ struct{} `type:"structure"` @@ -10255,7 +10337,6 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest type GetBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure"` @@ -10315,7 +10396,6 @@ func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyti return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput type GetBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` @@ -10339,7 +10419,6 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest type GetBucketCorsInput struct { _ struct{} `type:"structure"` @@ -10383,7 +10462,6 @@ func (s *GetBucketCorsInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput type GetBucketCorsOutput struct { _ struct{} `type:"structure"` @@ -10406,7 +10484,6 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryptionRequest type GetBucketEncryptionInput struct { _ struct{} `type:"structure"` @@ -10453,7 +10530,6 @@ func (s *GetBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryptionOutput type GetBucketEncryptionOutput struct { _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` @@ -10478,7 +10554,6 @@ func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *Serv return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest type GetBucketInventoryConfigurationInput struct { _ struct{} `type:"structure"` @@ -10538,7 +10613,6 @@ func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInvento return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput type GetBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure" payload:"InventoryConfiguration"` @@ -10562,7 +10636,6 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest type GetBucketLifecycleConfigurationInput struct { _ struct{} `type:"structure"` @@ -10606,7 +10679,6 @@ func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` @@ -10629,7 +10701,6 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest type GetBucketLifecycleInput struct { _ struct{} `type:"structure"` @@ -10673,7 +10744,6 @@ func (s *GetBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput type GetBucketLifecycleOutput struct { _ struct{} `type:"structure"` @@ -10696,7 +10766,6 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest type GetBucketLocationInput struct { _ struct{} `type:"structure"` @@ -10740,7 +10809,6 @@ func (s *GetBucketLocationInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput type GetBucketLocationOutput struct { _ struct{} `type:"structure"` @@ -10763,7 +10831,6 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest type GetBucketLoggingInput struct { _ struct{} `type:"structure"` @@ -10807,10 +10874,12 @@ func (s *GetBucketLoggingInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` + // Container for logging information. Presence of this element indicates that + // logging is enabled. Parameters TargetBucket and TargetPrefix are required + // in this case. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -10830,7 +10899,6 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest type GetBucketMetricsConfigurationInput struct { _ struct{} `type:"structure"` @@ -10890,7 +10958,6 @@ func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsCo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput type GetBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -10914,7 +10981,6 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest type GetBucketNotificationConfigurationRequest struct { _ struct{} `type:"structure"` @@ -10960,7 +11026,6 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest type GetBucketPolicyInput struct { _ struct{} `type:"structure"` @@ -11004,7 +11069,6 @@ func (s *GetBucketPolicyInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput type GetBucketPolicyOutput struct { _ struct{} `type:"structure" payload:"Policy"` @@ -11028,7 +11092,6 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest type GetBucketReplicationInput struct { _ struct{} `type:"structure"` @@ -11072,7 +11135,6 @@ func (s *GetBucketReplicationInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput type GetBucketReplicationOutput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` @@ -11097,7 +11159,6 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest type GetBucketRequestPaymentInput struct { _ struct{} `type:"structure"` @@ -11141,7 +11202,6 @@ func (s *GetBucketRequestPaymentInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput type GetBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` @@ -11165,7 +11225,6 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest type GetBucketTaggingInput struct { _ struct{} `type:"structure"` @@ -11209,7 +11268,6 @@ func (s *GetBucketTaggingInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput type GetBucketTaggingOutput struct { _ struct{} `type:"structure"` @@ -11233,7 +11291,6 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest type GetBucketVersioningInput struct { _ struct{} `type:"structure"` @@ -11277,7 +11334,6 @@ func (s *GetBucketVersioningInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput type GetBucketVersioningOutput struct { _ struct{} `type:"structure"` @@ -11312,7 +11368,6 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest type GetBucketWebsiteInput struct { _ struct{} `type:"structure"` @@ -11356,7 +11411,6 @@ func (s *GetBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` @@ -11403,7 +11457,6 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest type GetObjectAclInput struct { _ struct{} `type:"structure"` @@ -11483,7 +11536,6 @@ func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput type GetObjectAclOutput struct { _ struct{} `type:"structure"` @@ -11525,7 +11577,6 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest type GetObjectInput struct { _ struct{} `type:"structure"` @@ -11538,7 +11589,7 @@ type GetObjectInput struct { // Return the object only if it has been modified since the specified time, // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` // Return the object only if its entity tag (ETag) is different from the one // specified, otherwise return a 304 (not modified). @@ -11546,7 +11597,7 @@ type GetObjectInput struct { // Return the object only if it has not been modified since the specified time, // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -11582,7 +11633,7 @@ type GetObjectInput struct { ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` // Sets the Expires header of the response. - ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` // Specifies the algorithm to use to when encrypting the object (e.g., AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` @@ -11760,7 +11811,6 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` @@ -11810,7 +11860,7 @@ type GetObjectOutput struct { Expires *string `location:"header" locationName:"Expires" type:"string"` // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` @@ -12044,7 +12094,6 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest type GetObjectTaggingInput struct { _ struct{} `type:"structure"` @@ -12111,7 +12160,6 @@ func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput type GetObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -12143,7 +12191,6 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest type GetObjectTorrentInput struct { _ struct{} `type:"structure"` @@ -12214,7 +12261,6 @@ func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput type GetObjectTorrentOutput struct { _ struct{} `type:"structure" payload:"Body"` @@ -12247,7 +12293,6 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters type GlacierJobParameters struct { _ struct{} `type:"structure"` @@ -12286,7 +12331,6 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant type Grant struct { _ struct{} `type:"structure"` @@ -12333,7 +12377,6 @@ func (s *Grant) SetPermission(v string) *Grant { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee type Grantee struct { _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` @@ -12408,7 +12451,6 @@ func (s *Grantee) SetURI(v string) *Grantee { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest type HeadBucketInput struct { _ struct{} `type:"structure"` @@ -12452,7 +12494,6 @@ func (s *HeadBucketInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput type HeadBucketOutput struct { _ struct{} `type:"structure"` } @@ -12467,7 +12508,6 @@ func (s HeadBucketOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest type HeadObjectInput struct { _ struct{} `type:"structure"` @@ -12480,7 +12520,7 @@ type HeadObjectInput struct { // Return the object only if it has been modified since the specified time, // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` // Return the object only if its entity tag (ETag) is different from the one // specified, otherwise return a 304 (not modified). @@ -12488,7 +12528,7 @@ type HeadObjectInput struct { // Return the object only if it has not been modified since the specified time, // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -12649,7 +12689,6 @@ func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput type HeadObjectOutput struct { _ struct{} `type:"structure"` @@ -12693,7 +12732,7 @@ type HeadObjectOutput struct { Expires *string `location:"header" locationName:"Expires" type:"string"` // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` @@ -12906,7 +12945,6 @@ func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutpu return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument type IndexDocument struct { _ struct{} `type:"structure"` @@ -12948,7 +12986,6 @@ func (s *IndexDocument) SetSuffix(v string) *IndexDocument { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator type Initiator struct { _ struct{} `type:"structure"` @@ -12983,12 +13020,18 @@ func (s *Initiator) SetID(v string) *Initiator { } // Describes the serialization format of the object. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InputSerialization type InputSerialization struct { _ struct{} `type:"structure"` // Describes the serialization of a CSV-encoded object. CSV *CSVInput `type:"structure"` + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType *string `type:"string" enum:"CompressionType"` + + // Specifies JSON as object's input serialization format. + JSON *JSONInput `type:"structure"` } // String returns the string representation @@ -13007,7 +13050,18 @@ func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration +// SetCompressionType sets the CompressionType field's value. +func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { + s.CompressionType = &v + return s +} + +// SetJSON sets the JSON field's value. +func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { + s.JSON = v + return s +} + type InventoryConfiguration struct { _ struct{} `type:"structure"` @@ -13136,7 +13190,6 @@ func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryCon return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination type InventoryDestination struct { _ struct{} `type:"structure"` @@ -13183,7 +13236,6 @@ func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestin // Contains the type of server-side encryption used to encrypt the inventory // results. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryEncryption type InventoryEncryption struct { _ struct{} `type:"structure"` @@ -13231,7 +13283,6 @@ func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter type InventoryFilter struct { _ struct{} `type:"structure"` @@ -13270,7 +13321,6 @@ func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination type InventoryS3BucketDestination struct { _ struct{} `type:"structure"` @@ -13364,7 +13414,6 @@ func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDes return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule type InventorySchedule struct { _ struct{} `type:"structure"` @@ -13403,8 +13452,53 @@ func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { return s } +type JSONInput struct { + _ struct{} `type:"structure"` + + // The type of JSON. Valid values: Document, Lines. + Type *string `type:"string" enum:"JSONType"` +} + +// String returns the string representation +func (s JSONInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JSONInput) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *JSONInput) SetType(v string) *JSONInput { + s.Type = &v + return s +} + +type JSONOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual records in the output. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s JSONOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JSONOutput) GoString() string { + return s.String() +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { + s.RecordDelimiter = &v + return s +} + // Container for object key name prefix and suffix filtering rules. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter type KeyFilter struct { _ struct{} `type:"structure"` @@ -13430,7 +13524,6 @@ func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { } // Container for specifying the AWS Lambda notification configuration. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration type LambdaFunctionConfiguration struct { _ struct{} `type:"structure"` @@ -13439,6 +13532,7 @@ type LambdaFunctionConfiguration struct { // Container for object key name filtering rules. For information about key // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` // Optional unique identifier for configurations in a notification configuration. @@ -13502,7 +13596,6 @@ func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunc return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration type LifecycleConfiguration struct { _ struct{} `type:"structure"` @@ -13549,7 +13642,6 @@ func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration type LifecycleExpiration struct { _ struct{} `type:"structure"` @@ -13596,7 +13688,6 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule type LifecycleRule struct { _ struct{} `type:"structure"` @@ -13720,7 +13811,6 @@ func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { // This is used in a Lifecycle Rule Filter to apply a logical AND to two or // more predicates. The Lifecycle Rule will apply to any object matching all // of the predicates configured inside the And operator. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator type LifecycleRuleAndOperator struct { _ struct{} `type:"structure"` @@ -13775,7 +13865,6 @@ func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { // The Filter is used to identify objects that a Lifecycle Rule applies to. // A Filter must have exactly one of Prefix, Tag, or And specified. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter type LifecycleRuleFilter struct { _ struct{} `type:"structure"` @@ -13839,7 +13928,6 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest type ListBucketAnalyticsConfigurationsInput struct { _ struct{} `type:"structure"` @@ -13895,7 +13983,6 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput type ListBucketAnalyticsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -13950,7 +14037,6 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest type ListBucketInventoryConfigurationsInput struct { _ struct{} `type:"structure"` @@ -14008,7 +14094,6 @@ func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput type ListBucketInventoryConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -14063,7 +14148,6 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest type ListBucketMetricsConfigurationsInput struct { _ struct{} `type:"structure"` @@ -14121,7 +14205,6 @@ func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *L return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput type ListBucketMetricsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -14178,7 +14261,6 @@ func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v strin return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput type ListBucketsInput struct { _ struct{} `type:"structure"` } @@ -14193,7 +14275,6 @@ func (s ListBucketsInput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput type ListBucketsOutput struct { _ struct{} `type:"structure"` @@ -14224,7 +14305,6 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest type ListMultipartUploadsInput struct { _ struct{} `type:"structure"` @@ -14333,7 +14413,6 @@ func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` @@ -14467,7 +14546,6 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest type ListObjectVersionsInput struct { _ struct{} `type:"structure"` @@ -14571,7 +14649,6 @@ func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersio return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput type ListObjectVersionsOutput struct { _ struct{} `type:"structure"` @@ -14699,7 +14776,6 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest type ListObjectsInput struct { _ struct{} `type:"structure"` @@ -14805,7 +14881,6 @@ func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput type ListObjectsOutput struct { _ struct{} `type:"structure"` @@ -14910,7 +14985,6 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request type ListObjectsV2Input struct { _ struct{} `type:"structure"` @@ -15036,7 +15110,6 @@ func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output type ListObjectsV2Output struct { _ struct{} `type:"structure"` @@ -15170,7 +15243,6 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest type ListPartsInput struct { _ struct{} `type:"structure"` @@ -15274,12 +15346,11 @@ func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput type ListPartsOutput struct { _ struct{} `type:"structure"` // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` // Id of the lifecycle rule that makes a multipart upload eligible for abort // operation. @@ -15425,7 +15496,6 @@ func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { } // Describes an S3 location that will receive the results of the restore request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3Location type Location struct { _ struct{} `type:"structure"` @@ -15553,7 +15623,9 @@ func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled +// Container for logging information. Presence of this element indicates that +// logging is enabled. Parameters TargetBucket and TargetPrefix are required +// in this case. type LoggingEnabled struct { _ struct{} `type:"structure"` @@ -15563,13 +15635,17 @@ type LoggingEnabled struct { // to deliver their logs to the same target bucket. In this case you should // choose a different TargetPrefix for each source bucket so that the delivered // log files can be distinguished by key. - TargetBucket *string `type:"string"` + // + // TargetBucket is a required field + TargetBucket *string `type:"string" required:"true"` TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` // This element lets you specify a prefix for the keys that the log files will // be stored under. - TargetPrefix *string `type:"string"` + // + // TargetPrefix is a required field + TargetPrefix *string `type:"string" required:"true"` } // String returns the string representation @@ -15585,6 +15661,12 @@ func (s LoggingEnabled) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *LoggingEnabled) Validate() error { invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetBucket == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBucket")) + } + if s.TargetPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) + } if s.TargetGrants != nil { for i, v := range s.TargetGrants { if v == nil { @@ -15621,7 +15703,6 @@ func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { } // A metadata key-value pair to store with an object. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetadataEntry type MetadataEntry struct { _ struct{} `type:"structure"` @@ -15652,7 +15733,6 @@ func (s *MetadataEntry) SetValue(v string) *MetadataEntry { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator type MetricsAndOperator struct { _ struct{} `type:"structure"` @@ -15705,7 +15785,6 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration type MetricsConfiguration struct { _ struct{} `type:"structure"` @@ -15760,7 +15839,6 @@ func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter type MetricsFilter struct { _ struct{} `type:"structure"` @@ -15824,12 +15902,11 @@ func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload type MultipartUpload struct { _ struct{} `type:"structure"` // Date and time at which the multipart upload was initiated. - Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + Initiated *time.Time `type:"timestamp"` // Identifies who initiated the multipart upload. Initiator *Initiator `type:"structure"` @@ -15897,14 +15974,14 @@ func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { // configuration action on a bucket that has versioning enabled (or suspended) // to request that Amazon S3 delete noncurrent object versions at a specific // period in the object's lifetime. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration type NoncurrentVersionExpiration struct { _ struct{} `type:"structure"` // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` } @@ -15925,18 +16002,19 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers } // Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA or GLACIER storage class. If your bucket is -// versioning-enabled (or versioning is suspended), you can set this action -// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA -// or GLACIER storage class at a specific period in the object's lifetime. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition +// transition to the STANDARD_IA, ONEZONE_IA or GLACIER storage class. If your +// bucket is versioning-enabled (or versioning is suspended), you can set this +// action to request that Amazon S3 transition noncurrent object versions to +// the STANDARD_IA, ONEZONE_IA or GLACIER storage class at a specific period +// in the object's lifetime. type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` // The class of storage used to store the object. @@ -15967,7 +16045,6 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi // Container for specifying the notification configuration of the bucket. If // this element is empty, notifications are turned off on the bucket. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration type NotificationConfiguration struct { _ struct{} `type:"structure"` @@ -16046,7 +16123,6 @@ func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfigurati return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated type NotificationConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -16087,7 +16163,7 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf // Container for object key name filtering rules. For information about key // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter +// in the Amazon Simple Storage Service Developer Guide. type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` @@ -16111,7 +16187,6 @@ func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConf return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object type Object struct { _ struct{} `type:"structure"` @@ -16119,7 +16194,7 @@ type Object struct { Key *string `min:"1" type:"string"` - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` Owner *Owner `type:"structure"` @@ -16175,7 +16250,6 @@ func (s *Object) SetStorageClass(v string) *Object { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier type ObjectIdentifier struct { _ struct{} `type:"structure"` @@ -16226,7 +16300,6 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion type ObjectVersion struct { _ struct{} `type:"structure"` @@ -16240,7 +16313,7 @@ type ObjectVersion struct { Key *string `min:"1" type:"string"` // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` Owner *Owner `type:"structure"` @@ -16313,7 +16386,6 @@ func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { } // Describes the location where the restore job's output is stored. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OutputLocation type OutputLocation struct { _ struct{} `type:"structure"` @@ -16353,12 +16425,14 @@ func (s *OutputLocation) SetS3(v *Location) *OutputLocation { } // Describes how results of the Select job are serialized. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OutputSerialization type OutputSerialization struct { _ struct{} `type:"structure"` // Describes the serialization of CSV-encoded Select results. CSV *CSVOutput `type:"structure"` + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput `type:"structure"` } // String returns the string representation @@ -16377,7 +16451,12 @@ func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner +// SetJSON sets the JSON field's value. +func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { + s.JSON = v + return s +} + type Owner struct { _ struct{} `type:"structure"` @@ -16408,7 +16487,6 @@ func (s *Owner) SetID(v string) *Owner { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part type Part struct { _ struct{} `type:"structure"` @@ -16416,7 +16494,7 @@ type Part struct { ETag *string `type:"string"` // Date and time at which the part was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` // Part number identifying the part. This is a positive integer between 1 and // 10,000. @@ -16460,7 +16538,87 @@ func (s *Part) SetSize(v int64) *Part { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest +type Progress struct { + _ struct{} `type:"structure"` + + // Current number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // Current number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // Current number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation +func (s Progress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Progress) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Progress) SetBytesProcessed(v int64) *Progress { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Progress) SetBytesReturned(v int64) *Progress { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Progress) SetBytesScanned(v int64) *Progress { + s.BytesScanned = &v + return s +} + +type ProgressEvent struct { + _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` + + // The Progress event details. + Details *Progress `locationName:"Details" type:"structure"` +} + +// String returns the string representation +func (s ProgressEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProgressEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { + s.Details = v + return s +} + +// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ProgressEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + type PutBucketAccelerateConfigurationInput struct { _ struct{} `type:"structure" payload:"AccelerateConfiguration"` @@ -16520,7 +16678,6 @@ func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput type PutBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -16535,7 +16692,6 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest type PutBucketAclInput struct { _ struct{} `type:"structure" payload:"AccessControlPolicy"` @@ -16647,7 +16803,6 @@ func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput type PutBucketAclOutput struct { _ struct{} `type:"structure"` } @@ -16662,7 +16817,6 @@ func (s PutBucketAclOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest type PutBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` @@ -16741,7 +16895,6 @@ func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyti return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput type PutBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -16756,7 +16909,6 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest type PutBucketCorsInput struct { _ struct{} `type:"structure" payload:"CORSConfiguration"` @@ -16817,7 +16969,6 @@ func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBuck return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -16832,7 +16983,6 @@ func (s PutBucketCorsOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryptionRequest type PutBucketEncryptionInput struct { _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` @@ -16899,7 +17049,6 @@ func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *Serve return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryptionOutput type PutBucketEncryptionOutput struct { _ struct{} `type:"structure"` } @@ -16914,7 +17063,6 @@ func (s PutBucketEncryptionOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest type PutBucketInventoryConfigurationInput struct { _ struct{} `type:"structure" payload:"InventoryConfiguration"` @@ -16993,7 +17141,6 @@ func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *Inve return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput type PutBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -17008,7 +17155,6 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest type PutBucketLifecycleConfigurationInput struct { _ struct{} `type:"structure" payload:"LifecycleConfiguration"` @@ -17065,7 +17211,6 @@ func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *Buck return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -17080,7 +17225,6 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest type PutBucketLifecycleInput struct { _ struct{} `type:"structure" payload:"LifecycleConfiguration"` @@ -17137,7 +17281,6 @@ func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfigur return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -17152,7 +17295,6 @@ func (s PutBucketLifecycleOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest type PutBucketLoggingInput struct { _ struct{} `type:"structure" payload:"BucketLoggingStatus"` @@ -17213,7 +17355,6 @@ func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) * return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` } @@ -17228,7 +17369,6 @@ func (s PutBucketLoggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest type PutBucketMetricsConfigurationInput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -17307,7 +17447,6 @@ func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsC return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput type PutBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -17322,7 +17461,6 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest type PutBucketNotificationConfigurationInput struct { _ struct{} `type:"structure" payload:"NotificationConfiguration"` @@ -17386,7 +17524,6 @@ func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -17401,7 +17538,6 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest type PutBucketNotificationInput struct { _ struct{} `type:"structure" payload:"NotificationConfiguration"` @@ -17457,7 +17593,6 @@ func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *Notificatio return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` } @@ -17472,7 +17607,6 @@ func (s PutBucketNotificationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest type PutBucketPolicyInput struct { _ struct{} `type:"structure" payload:"Policy"` @@ -17540,7 +17674,6 @@ func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -17555,7 +17688,6 @@ func (s PutBucketPolicyOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest type PutBucketReplicationInput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` @@ -17619,7 +17751,6 @@ func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationCo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -17634,7 +17765,6 @@ func (s PutBucketReplicationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest type PutBucketRequestPaymentInput struct { _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` @@ -17695,7 +17825,6 @@ func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *Request return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } @@ -17710,7 +17839,6 @@ func (s PutBucketRequestPaymentOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest type PutBucketTaggingInput struct { _ struct{} `type:"structure" payload:"Tagging"` @@ -17771,7 +17899,6 @@ func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -17786,7 +17913,6 @@ func (s PutBucketTaggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest type PutBucketVersioningInput struct { _ struct{} `type:"structure" payload:"VersioningConfiguration"` @@ -17852,7 +17978,6 @@ func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfi return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } @@ -17867,7 +17992,6 @@ func (s PutBucketVersioningOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest type PutBucketWebsiteInput struct { _ struct{} `type:"structure" payload:"WebsiteConfiguration"` @@ -17928,7 +18052,6 @@ func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput type PutBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -17943,7 +18066,6 @@ func (s PutBucketWebsiteOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest type PutObjectAclInput struct { _ struct{} `type:"structure" payload:"AccessControlPolicy"` @@ -18091,7 +18213,6 @@ func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput type PutObjectAclOutput struct { _ struct{} `type:"structure"` @@ -18116,7 +18237,6 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest type PutObjectInput struct { _ struct{} `type:"structure" payload:"Body"` @@ -18156,7 +18276,7 @@ type PutObjectInput struct { ContentType *string `location:"header" locationName:"Content-Type" type:"string"` // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` @@ -18420,7 +18540,6 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput type PutObjectOutput struct { _ struct{} `type:"structure"` @@ -18515,7 +18634,6 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest type PutObjectTaggingInput struct { _ struct{} `type:"structure" payload:"Tagging"` @@ -18599,7 +18717,6 @@ func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput type PutObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -18624,7 +18741,6 @@ func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput // Container for specifying an configuration when you want Amazon S3 to publish // events to an Amazon Simple Queue Service (Amazon SQS) queue. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration type QueueConfiguration struct { _ struct{} `type:"structure"` @@ -18633,6 +18749,7 @@ type QueueConfiguration struct { // Container for object key name filtering rules. For information about key // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` // Optional unique identifier for configurations in a notification configuration. @@ -18696,7 +18813,6 @@ func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated type QueueConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -18746,7 +18862,45 @@ func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDep return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect +type RecordsEvent struct { + _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` + + // The byte array of partial, one or more result records. + // + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob"` +} + +// String returns the string representation +func (s RecordsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordsEvent) GoString() string { + return s.String() +} + +// SetPayload sets the Payload field's value. +func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { + s.Payload = v + return s +} + +// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *RecordsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.Payload = make([]byte, len(msg.Payload)) + copy(s.Payload, msg.Payload) + return nil +} + type Redirect struct { _ struct{} `type:"structure"` @@ -18815,7 +18969,6 @@ func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo type RedirectAllRequestsTo struct { _ struct{} `type:"structure"` @@ -18866,7 +19019,6 @@ func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { // Container for replication rules. You can add as many as 1,000 rules. Total // replication configuration size can be up to 2 MB. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration type ReplicationConfiguration struct { _ struct{} `type:"structure"` @@ -18932,7 +19084,6 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo } // Container for information about a particular replication rule. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule type ReplicationRule struct { _ struct{} `type:"structure"` @@ -19029,7 +19180,6 @@ func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration type RequestPaymentConfiguration struct { _ struct{} `type:"structure"` @@ -19068,7 +19218,30 @@ func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfigur return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest +type RequestProgress struct { + _ struct{} `type:"structure"` + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s RequestProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestProgress) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { + s.Enabled = &v + return s +} + type RestoreObjectInput struct { _ struct{} `type:"structure" payload:"RestoreRequest"` @@ -19161,7 +19334,6 @@ func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput type RestoreObjectOutput struct { _ struct{} `type:"structure"` @@ -19197,7 +19369,6 @@ func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutpu } // Container for restore job parameters. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest type RestoreRequest struct { _ struct{} `type:"structure"` @@ -19302,7 +19473,6 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule type RoutingRule struct { _ struct{} `type:"structure"` @@ -19355,7 +19525,6 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule type Rule struct { _ struct{} `type:"structure"` @@ -19376,10 +19545,11 @@ type Rule struct { NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA or GLACIER storage class. If your bucket is - // versioning-enabled (or versioning is suspended), you can set this action - // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA - // or GLACIER storage class at a specific period in the object's lifetime. + // transition to the STANDARD_IA, ONEZONE_IA or GLACIER storage class. If your + // bucket is versioning-enabled (or versioning is suspended), you can set this + // action to request that Amazon S3 transition noncurrent object versions to + // the STANDARD_IA, ONEZONE_IA or GLACIER storage class at a specific period + // in the object's lifetime. NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` // Prefix identifying one or more objects to which the rule applies. @@ -19471,7 +19641,6 @@ func (s *Rule) SetTransition(v *Transition) *Rule { } // Specifies the use of SSE-KMS to encrypt delievered Inventory reports. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SSEKMS type SSEKMS struct { _ struct{} `locationName:"SSE-KMS" type:"structure"` @@ -19512,7 +19681,6 @@ func (s *SSEKMS) SetKeyId(v string) *SSEKMS { } // Specifies the use of SSE-S3 to encrypt delievered Inventory reports. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SSES3 type SSES3 struct { _ struct{} `locationName:"SSE-S3" type:"structure"` } @@ -19527,8 +19695,440 @@ func (s SSES3) GoString() string { return s.String() } +// SelectObjectContentEventStream provides handling of EventStreams for +// the SelectObjectContent API. +// +// Use this type to receive SelectObjectContentEventStream events. The events +// can be read from the Events channel member. +// +// The events that can be received are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStream struct { + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer +} + +// Close closes the EventStream. This will also cause the Events channel to be +// closed. You can use the closing of the Events channel to terminate your +// application's read from the API's EventStream. +// +// Will close the underlying EventStream reader. For EventStream over HTTP +// connection this will also close the HTTP connection. +// +// Close must be called when done using the EventStream API. Not calling Close +// may result in resource leaks. +func (es *SelectObjectContentEventStream) Close() (err error) { + es.Reader.Close() + return es.Err() +} + +// Err returns any error that occurred while reading EventStream Events from +// the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.Reader.Err(); err != nil { + return err + } + es.StreamCloser.Close() + + return nil +} + +// Events returns a channel to read EventStream Events from the +// SelectObjectContent API. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +// SelectObjectContentEventStreamEvent groups together all EventStream +// events read from the SelectObjectContent API. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamEvent interface { + eventSelectObjectContentEventStream() +} + +// SelectObjectContentEventStreamReader provides the interface for reading EventStream +// Events from the SelectObjectContent API. The +// default implementation for this interface will be SelectObjectContentEventStream. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan SelectObjectContentEventStreamEvent + + // Close will close the underlying event stream reader. For event stream over + // HTTP this will also close the HTTP connection. + Close() error + + // Returns any error that has occured while reading from the event stream. + Err() error +} + +type readSelectObjectContentEventStream struct { + eventReader *eventstreamapi.EventReader + stream chan SelectObjectContentEventStreamEvent + errVal atomic.Value + + done chan struct{} + closeOnce sync.Once +} + +func newReadSelectObjectContentEventStream( + reader io.ReadCloser, + unmarshalers request.HandlerList, + logger aws.Logger, + logLevel aws.LogLevelType, +) *readSelectObjectContentEventStream { + r := &readSelectObjectContentEventStream{ + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + } + + r.eventReader = eventstreamapi.NewEventReader( + reader, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: unmarshalers, + }, + r.unmarshalerForEventType, + ) + r.eventReader.UseLogger(logger, logLevel) + + return r +} + +// Close will close the underlying event stream reader. For EventStream over +// HTTP this will also close the HTTP connection. +func (r *readSelectObjectContentEventStream) Close() error { + r.closeOnce.Do(r.safeClose) + + return r.Err() +} + +func (r *readSelectObjectContentEventStream) safeClose() { + close(r.done) + err := r.eventReader.Close() + if err != nil { + r.errVal.Store(err) + } +} + +func (r *readSelectObjectContentEventStream) Err() error { + if v := r.errVal.Load(); v != nil { + return v.(error) + } + + return nil +} + +func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return r.stream +} + +func (r *readSelectObjectContentEventStream) readEventStream() { + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + r.errVal.Store(err) + return + } + + select { + case r.stream <- event.(SelectObjectContentEventStreamEvent): + case <-r.done: + return + } + } +} + +func (r *readSelectObjectContentEventStream) unmarshalerForEventType( + eventType string, +) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "Cont": + return &ContinuationEvent{}, nil + + case "End": + return &EndEvent{}, nil + + case "Progress": + return &ProgressEvent{}, nil + + case "Records": + return &RecordsEvent{}, nil + + case "Stats": + return &StatsEvent{}, nil + default: + return nil, awserr.New( + request.ErrCodeSerialization, + fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType), + nil, + ) + } +} + +// Request to filter the contents of an Amazon S3 object based on a simple Structured +// Query Language (SQL) statement. In the request, along with the SQL expression, +// you must also specify a data serialization format (JSON or CSV) of the object. +// Amazon S3 uses this to parse object data into records, and returns only records +// that match the specified SQL expression. You must also specify the data serialization +// format for the response. For more information, go to S3Select API Documentation +// (http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The S3 Bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (e.g., SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the format of the data in the object that is being queried. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // The Object Key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` + + // Specifies if periodic request progress information should be enabled. + RequestProgress *RequestProgress `type:"structure"` + + // The SSE Algorithm used to encrypt the object. For more information, go to + // Server-Side Encryption (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The SSE Customer Key. For more information, go to Server-Side Encryption + // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // The SSE Customer Key MD5. For more information, go to Server-Side Encryption + // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` +} + +// String returns the string representation +func (s SelectObjectContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectObjectContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectObjectContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { + s.Bucket = &v + return s +} + +func (s *SelectObjectContentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpression sets the Expression field's value. +func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { + s.InputSerialization = v + return s +} + +// SetKey sets the Key field's value. +func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { + s.Key = &v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { + s.OutputSerialization = v + return s +} + +// SetRequestProgress sets the RequestProgress field's value. +func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { + s.RequestProgress = v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { + s.SSECustomerKey = &v + return s +} + +func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { + s.SSECustomerKeyMD5 = &v + return s +} + +type SelectObjectContentOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // Use EventStream to use the API's stream. + EventStream *SelectObjectContentEventStream `type:"structure"` +} + +// String returns the string representation +func (s SelectObjectContentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectObjectContentOutput) GoString() string { + return s.String() +} + +// SetEventStream sets the EventStream field's value. +func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { + s.EventStream = v + return s +} + +func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) { + if r.Error != nil { + return + } + reader := newReadSelectObjectContentEventStream( + r.HTTPResponse.Body, + r.Handlers.UnmarshalStream, + r.Config.Logger, + r.Config.LogLevel.Value(), + ) + go reader.readEventStream() + + eventStream := &SelectObjectContentEventStream{ + StreamCloser: r.HTTPResponse.Body, + Reader: reader, + } + s.EventStream = eventStream +} + // Describes the parameters for Select job types. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectParameters type SelectParameters struct { _ struct{} `type:"structure"` @@ -19612,7 +20212,6 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec // Describes the default server-side encryption to apply to new objects in the // bucket. If Put Object request does not specify any server-side encryption, // this default encryption will be applied. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionByDefault type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` @@ -19663,7 +20262,6 @@ func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEnc // Container for server-side encryption configuration rules. Currently S3 supports // one rule only. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionConfiguration type ServerSideEncryptionConfiguration struct { _ struct{} `type:"structure"` @@ -19715,7 +20313,6 @@ func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRu // Container for information about a particular server-side encryption configuration // rule. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionRule type ServerSideEncryptionRule struct { _ struct{} `type:"structure"` @@ -19757,7 +20354,6 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv } // Container for filters that define which source objects should be replicated. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SourceSelectionCriteria type SourceSelectionCriteria struct { _ struct{} `type:"structure"` @@ -19797,7 +20393,6 @@ func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedOb } // Container for filter information of selection of KMS Encrypted S3 objects. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SseKmsEncryptedObjects type SseKmsEncryptedObjects struct { _ struct{} `type:"structure"` @@ -19837,7 +20432,87 @@ func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis +type Stats struct { + _ struct{} `type:"structure"` + + // Total number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // Total number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // Total number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation +func (s Stats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stats) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Stats) SetBytesProcessed(v int64) *Stats { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Stats) SetBytesReturned(v int64) *Stats { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Stats) SetBytesScanned(v int64) *Stats { + s.BytesScanned = &v + return s +} + +type StatsEvent struct { + _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` + + // The Stats event details. + Details *Stats `locationName:"Details" type:"structure"` +} + +// String returns the string representation +func (s StatsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatsEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { + s.Details = v + return s +} + +// The StatsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *StatsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *StatsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + type StorageClassAnalysis struct { _ struct{} `type:"structure"` @@ -19877,7 +20552,6 @@ func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport type StorageClassAnalysisDataExport struct { _ struct{} `type:"structure"` @@ -19935,7 +20609,6 @@ func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *Stora return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag type Tag struct { _ struct{} `type:"structure"` @@ -19991,7 +20664,6 @@ func (s *Tag) SetValue(v string) *Tag { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging type Tagging struct { _ struct{} `type:"structure"` @@ -20038,7 +20710,6 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant type TargetGrant struct { _ struct{} `type:"structure"` @@ -20087,7 +20758,6 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { // Container for specifying the configuration when you want Amazon S3 to publish // events to an Amazon Simple Notification Service (Amazon SNS) topic. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration type TopicConfiguration struct { _ struct{} `type:"structure"` @@ -20096,6 +20766,7 @@ type TopicConfiguration struct { // Container for object key name filtering rules. For information about key // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` // Optional unique identifier for configurations in a notification configuration. @@ -20159,7 +20830,6 @@ func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -20211,7 +20881,6 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition type Transition struct { _ struct{} `type:"structure"` @@ -20255,7 +20924,6 @@ func (s *Transition) SetStorageClass(v string) *Transition { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest type UploadPartCopyInput struct { _ struct{} `type:"structure"` @@ -20272,14 +20940,14 @@ type UploadPartCopyInput struct { CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` // Copies the object if its entity tag (ETag) is different than the specified // ETag. CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` // The range of bytes to copy from the source object. The range value must use // the form bytes=first-last, where the first and last are the zero-based byte @@ -20499,7 +21167,6 @@ func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` @@ -20584,7 +21251,6 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest type UploadPartInput struct { _ struct{} `type:"structure" payload:"Body"` @@ -20757,7 +21423,6 @@ func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput type UploadPartOutput struct { _ struct{} `type:"structure"` @@ -20833,7 +21498,6 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration type VersioningConfiguration struct { _ struct{} `type:"structure"` @@ -20868,7 +21532,6 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration type WebsiteConfiguration struct { _ struct{} `type:"structure"` @@ -21031,6 +21694,17 @@ const ( BucketVersioningStatusSuspended = "Suspended" ) +const ( + // CompressionTypeNone is a CompressionType enum value + CompressionTypeNone = "NONE" + + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "GZIP" + + // CompressionTypeBzip2 is a CompressionType enum value + CompressionTypeBzip2 = "BZIP2" +) + // Requests Amazon S3 to encode the object keys in the response and specifies // the encoding method to use. An object key may contain any Unicode character; // however, XML 1.0 parser cannot parse some characters, such as characters @@ -21151,6 +21825,14 @@ const ( InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" ) +const ( + // JSONTypeDocument is a JSONType enum value + JSONTypeDocument = "DOCUMENT" + + // JSONTypeLines is a JSONType enum value + JSONTypeLines = "LINES" +) + const ( // MFADeleteEnabled is a MFADelete enum value MFADeleteEnabled = "Enabled" @@ -21207,6 +21889,12 @@ const ( // ObjectStorageClassGlacier is a ObjectStorageClass enum value ObjectStorageClassGlacier = "GLACIER" + + // ObjectStorageClassStandardIa is a ObjectStorageClass enum value + ObjectStorageClassStandardIa = "STANDARD_IA" + + // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value + ObjectStorageClassOnezoneIa = "ONEZONE_IA" ) const ( @@ -21328,6 +22016,9 @@ const ( // StorageClassStandardIa is a StorageClass enum value StorageClassStandardIa = "STANDARD_IA" + + // StorageClassOnezoneIa is a StorageClass enum value + StorageClassOnezoneIa = "ONEZONE_IA" ) const ( @@ -21360,6 +22051,9 @@ const ( // TransitionStorageClassStandardIa is a TransitionStorageClass enum value TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go new file mode 100644 index 00000000..5c8ce5cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -0,0 +1,249 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + contentMD5Header = "Content-Md5" + contentSha256Header = "X-Amz-Content-Sha256" + amzTeHeader = "X-Amz-Te" + amzTxEncodingHeader = "X-Amz-Transfer-Encoding" + + appendMD5TxEncoding = "append-md5" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + if _, err := copySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} + +// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the +// request. If the body is not seekable or S3DisableContentMD5Validation set +// this handler will be ignored. +func computeBodyHashes(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + if r.Error != nil || !aws.IsReaderSeekable(r.Body) { + return + } + + var md5Hash, sha256Hash hash.Hash + hashers := make([]io.Writer, 0, 2) + + // Determine upfront which hashes can be set without overriding user + // provide header data. + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { + md5Hash = md5.New() + hashers = append(hashers, md5Hash) + } + + if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { + sha256Hash = sha256.New() + hashers = append(hashers, sha256Hash) + } + + // Create the destination writer based on the hashes that are not already + // provided by the user. + var dst io.Writer + switch len(hashers) { + case 0: + return + case 1: + dst = hashers[0] + default: + dst = io.MultiWriter(hashers...) + } + + if _, err := copySeekableBody(dst, r.Body); err != nil { + r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) + return + } + + // For the hashes created, set the associated headers that the user did not + // already provide. + if md5Hash != nil { + sum := make([]byte, md5.Size) + encoded := make([]byte, md5Base64EncLen) + + base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} + } + + if sha256Hash != nil { + encoded := make([]byte, sha256HexEncLen) + sum := make([]byte, sha256.Size) + + hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} + } +} + +const ( + md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen + sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen +) + +func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} + +// Adds the x-amz-te: append_md5 header to the request. This requests the service +// responds with a trailing MD5 checksum. +// +// Will not ask for append MD5 if disabled, the request is presigned or, +// or the API operation does not support content MD5 validation. +func askForTxEncodingAppendMD5(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) +} + +func useMD5ValidationReader(r *request.Request) { + if r.Error != nil { + return + } + + if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { + return + } + + var bodyReader *io.ReadCloser + var contentLen int64 + switch tv := r.Data.(type) { + case *GetObjectOutput: + bodyReader = &tv.Body + contentLen = aws.Int64Value(tv.ContentLength) + // Update ContentLength hiden the trailing MD5 checksum. + tv.ContentLength = aws.Int64(contentLen - md5.Size) + tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) + default: + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("%s: %s header received on unsupported API, %s", + amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, + ), nil) + return + } + + if contentLen < md5.Size { + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("invalid Content-Length %d for %s %s", + contentLen, appendMD5TxEncoding, amzTxEncodingHeader, + ), nil) + return + } + + // Wrap and swap the response body reader with the validation reader. + *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) +} + +type md5ValidationReader struct { + rawReader io.ReadCloser + payload io.Reader + hash hash.Hash + + payloadLen int64 + read int64 +} + +func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { + h := md5.New() + return &md5ValidationReader{ + rawReader: reader, + payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), + hash: h, + payloadLen: payloadLen, + } +} + +func (v *md5ValidationReader) Read(p []byte) (n int, err error) { + n, err = v.payload.Read(p) + if err != nil && err != io.EOF { + return n, err + } + + v.read += int64(n) + + if err == io.EOF { + if v.read != v.payloadLen { + return n, io.ErrUnexpectedEOF + } + expectSum := make([]byte, md5.Size) + actualSum := make([]byte, md5.Size) + if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { + return n, sumReadErr + } + actualSum = v.hash.Sum(actualSum[0:0]) + if !bytes.Equal(expectSum, actualSum) { + return n, awserr.New("InvalidChecksum", + fmt.Sprintf("expected MD5 checksum %s, got %s", + hex.EncodeToString(expectSum), + hex.EncodeToString(actualSum), + ), + nil) + } + } + + return n, err +} + +func (v *md5ValidationReader) Close() error { + return v.rawReader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go deleted file mode 100644 index 9fc5df94..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go +++ /dev/null @@ -1,36 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// contentMD5 computes and sets the HTTP Content-MD5 header for requests that -// require it. -func contentMD5(r *request.Request) { - h := md5.New() - - // hash the body. seek back to the first position after reading to reset - // the body for transmission. copy errors may be assumed to be from the - // body. - _, err := io.Copy(h, r.Body) - if err != nil { - r.Error = awserr.New("ContentMD5", "failed to read body", err) - return - } - _, err = r.Body.Seek(0, 0) - if err != nil { - r.Error = awserr.New("ContentMD5", "failed to seek body", err) - return - } - - // encode the md5 checksum in base64 and set the request header. - sum := h.Sum(nil) - sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) - base64.StdEncoding.Encode(sum64, sum) - r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 899d5e8d..a55beab9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -42,6 +42,12 @@ func defaultInitRequestFn(r *request.Request) { r.Handlers.Validate.PushFront(populateLocationConstraint) case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + case opPutObject, opUploadPart: + r.Handlers.Build.PushBack(computeBodyHashes) + // Disabled until #1837 root issue is resolved. + // case opGetObject: + // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) + // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index 614e477d..20de53f2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -29,8 +29,9 @@ var initRequest func(*request.Request) // Service information constants const ( - ServiceName = "s3" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. + ServiceName = "s3" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "S3" // ServiceID is a unique identifer of a specific service. ) // New creates a new instance of the S3 client with a session. @@ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, @@ -71,6 +73,8 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) + // Run custom client initialization if present if initClient != nil { initClient(svc.Client) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index 5a78fd33..9f33efc6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -7,6 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" ) func copyMultipartStatusOKUnmarhsalError(r *request.Request) { @@ -17,7 +18,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { } body := bytes.NewReader(b) r.HTTPResponse.Body = ioutil.NopCloser(body) - defer body.Seek(0, 0) + defer body.Seek(0, sdkio.SeekStart) if body.Len() == 0 { // If there is no body don't attempt to parse the body. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 3b8be437..6f89a796 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -14,7 +14,7 @@ const opAssumeRole = "AssumeRole" // AssumeRoleRequest generates a "aws/request.Request" representing the // client's request for the AssumeRole operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -35,7 +35,7 @@ const opAssumeRole = "AssumeRole" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { op := &request.Operation{ Name: opAssumeRole, @@ -88,9 +88,18 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) // in the IAM User Guide. // -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a -// maximum of 3600 seconds (1 hour). The default is 1 hour. +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRole can be used to make // API calls to any AWS service with the following exception: you cannot call @@ -121,7 +130,12 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // the user to call AssumeRole on the ARN of the role in the other account. // If the user is in the same account as the role, then you can either attach // a policy to the user (identical to the previous different account user), -// or you can add the user as a principal directly in the role's trust policy +// or you can add the user as a principal directly in the role's trust policy. +// In this case, the trust policy acts as the only resource-based policy in +// IAM, and users in the same account as the role do not need explicit permission +// to assume the role. For more information about trust policies and resource-based +// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. // // Using MFA with AssumeRole // @@ -168,7 +182,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { req, out := c.AssumeRoleRequest(input) return out, req.Send() @@ -194,7 +208,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithSAML operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -215,7 +229,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { op := &request.Operation{ Name: opAssumeRoleWithSAML, @@ -247,11 +261,20 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // an access key ID, a secret access key, and a security token. Applications // can use these temporary security credentials to sign calls to AWS services. // -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. The duration -// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). -// The default is 1 hour. +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any AWS service with the following exception: you cannot @@ -341,7 +364,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { req, out := c.AssumeRoleWithSAMLRequest(input) return out, req.Send() @@ -367,7 +390,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -388,7 +411,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { op := &request.Operation{ Name: opAssumeRoleWithWebIdentity, @@ -438,9 +461,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // key ID, a secret access key, and a security token. Applications can use these // temporary security credentials to sign calls to AWS service APIs. // -// The credentials are valid for the duration that you specified when calling -// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to -// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any AWS service with the following exception: @@ -492,7 +524,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // the information from these providers to get and use temporary security // credentials. // -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). // This article discusses web identity federation and shows an example of // how to use web identity federation to get access to content in Amazon // S3. @@ -543,7 +575,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { req, out := c.AssumeRoleWithWebIdentityRequest(input) return out, req.Send() @@ -569,7 +601,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the // client's request for the DecodeAuthorizationMessage operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -590,7 +622,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { op := &request.Operation{ Name: opDecodeAuthorizationMessage, @@ -655,7 +687,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // invalid. This can happen if the token contains invalid characters, such as // linebreaks. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { req, out := c.DecodeAuthorizationMessageRequest(input) return out, req.Send() @@ -681,7 +713,7 @@ const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the // client's request for the GetCallerIdentity operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -702,7 +734,7 @@ const opGetCallerIdentity = "GetCallerIdentity" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { op := &request.Operation{ Name: opGetCallerIdentity, @@ -730,7 +762,7 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // // See the AWS API reference guide for AWS Security Token Service's // API operation GetCallerIdentity for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { req, out := c.GetCallerIdentityRequest(input) return out, req.Send() @@ -756,7 +788,7 @@ const opGetFederationToken = "GetFederationToken" // GetFederationTokenRequest generates a "aws/request.Request" representing the // client's request for the GetFederationToken operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -777,7 +809,7 @@ const opGetFederationToken = "GetFederationToken" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { op := &request.Operation{ Name: opGetFederationToken, @@ -899,7 +931,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { req, out := c.GetFederationTokenRequest(input) return out, req.Send() @@ -925,7 +957,7 @@ const opGetSessionToken = "GetSessionToken" // GetSessionTokenRequest generates a "aws/request.Request" representing the // client's request for the GetSessionToken operation. The "output" return -// value will be populated with the request's response once the request complets +// value will be populated with the request's response once the request completes // successfuly. // // Use "Send" method on the returned Request to send the API call to the service. @@ -946,7 +978,7 @@ const opGetSessionToken = "GetSessionToken" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { op := &request.Operation{ Name: opGetSessionToken, @@ -1027,7 +1059,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { req, out := c.GetSessionTokenRequest(input) return out, req.Send() @@ -1049,20 +1081,27 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken return out, req.Send() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest type AssumeRoleInput struct { _ struct{} `type:"structure"` // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1241,7 +1280,6 @@ func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { // Contains the response to a successful AssumeRole request, including temporary // AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse type AssumeRoleOutput struct { _ struct{} `type:"structure"` @@ -1295,22 +1333,30 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest type AssumeRoleWithSAMLInput struct { _ struct{} `type:"structure"` - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. An expiration can also be specified in the SAML authentication - // response's SessionNotOnOrAfter value. The actual expiration time is whichever - // value is shorter. + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Enabling SAML 2.0 Federated - // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1436,7 +1482,6 @@ func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAML // Contains the response to a successful AssumeRoleWithSAML request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse type AssumeRoleWithSAMLOutput struct { _ struct{} `type:"structure"` @@ -1548,20 +1593,27 @@ func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLO return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest type AssumeRoleWithWebIdentityInput struct { _ struct{} `type:"structure"` // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1711,7 +1763,6 @@ func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRo // Contains the response to a successful AssumeRoleWithWebIdentity request, // including temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse type AssumeRoleWithWebIdentityOutput struct { _ struct{} `type:"structure"` @@ -1804,7 +1855,6 @@ func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v strin // The identifiers for the temporary security credentials that the operation // returns. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser type AssumedRoleUser struct { _ struct{} `type:"structure"` @@ -1847,7 +1897,6 @@ func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { } // AWS credentials for API authentication. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials type Credentials struct { _ struct{} `type:"structure"` @@ -1859,7 +1908,7 @@ type Credentials struct { // The date on which the current credentials expire. // // Expiration is a required field - Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + Expiration *time.Time `type:"timestamp" required:"true"` // The secret access key that can be used to sign requests. // @@ -1906,7 +1955,6 @@ func (s *Credentials) SetSessionToken(v string) *Credentials { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest type DecodeAuthorizationMessageInput struct { _ struct{} `type:"structure"` @@ -1951,7 +1999,6 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut // A document that contains additional information about the authorization status // of a request from an encoded message that is returned in response to an AWS // request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse type DecodeAuthorizationMessageOutput struct { _ struct{} `type:"structure"` @@ -1976,7 +2023,6 @@ func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAu } // Identifiers for the federated user that is associated with the credentials. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser type FederatedUser struct { _ struct{} `type:"structure"` @@ -2017,7 +2063,6 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -2034,7 +2079,6 @@ func (s GetCallerIdentityInput) GoString() string { // Contains the response to a successful GetCallerIdentity request, including // information about the entity making the request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse type GetCallerIdentityOutput struct { _ struct{} `type:"structure"` @@ -2080,7 +2124,6 @@ func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest type GetFederationTokenInput struct { _ struct{} `type:"structure"` @@ -2189,7 +2232,6 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse type GetFederationTokenOutput struct { _ struct{} `type:"structure"` @@ -2242,7 +2284,6 @@ func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest type GetSessionTokenInput struct { _ struct{} `type:"structure"` @@ -2327,7 +2368,6 @@ func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { // Contains the response to a successful GetSessionToken request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse type GetSessionTokenOutput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 1ee5839e..185c914d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -29,8 +29,9 @@ var initRequest func(*request.Request) // Service information constants const ( - ServiceName = "sts" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifer of a specific service. ) // New creates a new instance of the STS client with a session. @@ -55,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, From de21eb6c967f66fa329b2b82f783cd575a341c97 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 14 Aug 2018 14:23:44 -0700 Subject: [PATCH 138/276] Revert "Fixed customer facing grammar and spelling" This reverts commit a64394ece5f69596602bcdf12ad578caf5ea89b8. Signed-off-by: Derek McGowan --- vendor/gopkg.in/yaml.v2/scannerc.go | 36 ++++++++++++++--------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index 0c86a82f..fe93b190 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -961,15 +961,15 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml } // Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append +// becomes less or equal to the column. For each intendation level, append // the BLOCK-END token. func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { // In the flow context, do nothing. - if parser.flow_level > 0 { + if parser.flow_level > 0 { return true } - // Loop through the indentation levels in the stack. + // Loop through the intendation levels in the stack. for parser.indent > column { // Create a token and append it to the queue. token := yaml_token_t{ @@ -2085,14 +2085,14 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l return false } if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. + // Check that the intendation is greater than 0. if parser.buffer[parser.buffer_pos] == '0' { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") + start_mark, "found an intendation indicator equal to 0") return false } - // Get the indentation level and eat the indicator. + // Get the intendation level and eat the indicator. increment = as_digit(parser.buffer, parser.buffer_pos) skip(parser) } @@ -2102,7 +2102,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l if parser.buffer[parser.buffer_pos] == '0' { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") + start_mark, "found an intendation indicator equal to 0") return false } increment = as_digit(parser.buffer, parser.buffer_pos) @@ -2157,7 +2157,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l end_mark := parser.mark - // Set the indentation level if it was specified. + // Set the intendation level if it was specified. var indent int if increment > 0 { if parser.indent >= 0 { @@ -2217,7 +2217,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l leading_break = read_line(parser, leading_break) - // Eat the following indentation spaces and line breaks. + // Eat the following intendation spaces and line breaks. if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { return false } @@ -2245,15 +2245,15 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l return true } -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. +// Scan intendation spaces and line breaks for a block scalar. Determine the +// intendation level if needed. func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { *end_mark = parser.mark - // Eat the indentation spaces and line breaks. + // Eat the intendation spaces and line breaks. max_indent := 0 for { - // Eat the indentation spaces. + // Eat the intendation spaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } @@ -2267,10 +2267,10 @@ func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, br max_indent = parser.mark.column } - // Check for a tab character messing the indentation. + // Check for a tab character messing the intendation. if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") + start_mark, "found a tab character where an intendation space is expected") } // Have we found a non-empty line? @@ -2655,10 +2655,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { - // Check for tab character that abuse indentation. + // Check for tab character that abuse intendation. if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indatation") + start_mark, "found a tab character that violate intendation") return false } @@ -2687,7 +2687,7 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b } } - // Check indentation level. + // Check intendation level. if parser.flow_level == 0 && parser.mark.column < indent { break } From 13f8189f2a6095991c660cb319b59eba10a69c09 Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Tue, 14 Aug 2018 13:10:56 -0700 Subject: [PATCH 139/276] fix memory leak introduced in PR #2648 context.App.repoRemover is single registry instance stored throughout app run. It was wrapped in another remover when processing each request. This remover happened to be remover got from previous request. This way every remover created was stored in infinite linked list causing memory leak. Fixing it by storing the wrapped remover inside the request context which will get gced when request context is gced. This was introduced in PR #2648. Signed-off-by: Manish Tomar --- registry/handlers/app.go | 2 +- registry/handlers/context.go | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/registry/handlers/app.go b/registry/handlers/app.go index 4cc47371..35b5504f 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -702,7 +702,7 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { } // assign and decorate the authorized repository with an event bridge. - context.Repository, context.App.repoRemover = notifications.Listen( + context.Repository, context.RepositoryRemover = notifications.Listen( repository, context.App.repoRemover, app.eventBridge(context, r)) diff --git a/registry/handlers/context.go b/registry/handlers/context.go index e5d26768..7a67b604 100644 --- a/registry/handlers/context.go +++ b/registry/handlers/context.go @@ -25,6 +25,9 @@ type Context struct { // should be scoped to a single repository. This field may be nil. Repository distribution.Repository + // RepositoryRemover provides method to delete a repository + RepositoryRemover distribution.RepositoryRemover + // Errors is a collection of errors encountered during the request to be // returned to the client API. If errors are added to the collection, the // handler *must not* start the response via http.ResponseWriter. From f0ee5720a51982529f99444db659ea91c68b49ab Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 14 Aug 2018 15:23:04 -0700 Subject: [PATCH 140/276] Update yaml parser Mark the top level Loglevel field as deprecated Signed-off-by: Derek McGowan --- configuration/configuration.go | 18 +- configuration/configuration_test.go | 14 +- registry/registry.go | 7 - vendor.conf | 2 +- vendor/gopkg.in/yaml.v2/LICENSE | 325 +++++++++++++++------------- vendor/gopkg.in/yaml.v2/NOTICE | 13 ++ vendor/gopkg.in/yaml.v2/README.md | 9 +- vendor/gopkg.in/yaml.v2/apic.go | 55 +++-- vendor/gopkg.in/yaml.v2/decode.go | 264 +++++++++++++++------- vendor/gopkg.in/yaml.v2/emitterc.go | 22 +- vendor/gopkg.in/yaml.v2/encode.go | 153 +++++++++---- vendor/gopkg.in/yaml.v2/parserc.go | 1 - vendor/gopkg.in/yaml.v2/readerc.go | 27 ++- vendor/gopkg.in/yaml.v2/resolve.go | 91 ++++++-- vendor/gopkg.in/yaml.v2/scannerc.go | 72 +++--- vendor/gopkg.in/yaml.v2/sorter.go | 9 + vendor/gopkg.in/yaml.v2/writerc.go | 65 +----- vendor/gopkg.in/yaml.v2/yaml.go | 168 ++++++++++++-- vendor/gopkg.in/yaml.v2/yamlh.go | 32 ++- 19 files changed, 855 insertions(+), 492 deletions(-) create mode 100644 vendor/gopkg.in/yaml.v2/NOTICE diff --git a/configuration/configuration.go b/configuration/configuration.go index 621d29ae..f4ba9c7f 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -30,7 +30,7 @@ type Configuration struct { } `yaml:"accesslog,omitempty"` // Level is the granularity at which registry operations are logged. - Level Loglevel `yaml:"level"` + Level Loglevel `yaml:"level,omitempty"` // Formatter overrides the default formatter with another. Options // include "text", "json" and "logstash". @@ -45,8 +45,9 @@ type Configuration struct { Hooks []LogHook `yaml:"hooks,omitempty"` } - // Loglevel is the level at which registry operations are logged. This is - // deprecated. Please use Log.Level in the future. + // Loglevel is the level at which registry operations are logged. + // + // Deprecated: Use Log.Level instead. Loglevel Loglevel `yaml:"loglevel,omitempty"` // Storage is the configuration for the registry's storage driver @@ -640,8 +641,15 @@ func Parse(rd io.Reader) (*Configuration, error) { ParseAs: reflect.TypeOf(v0_1Configuration{}), ConversionFunc: func(c interface{}) (interface{}, error) { if v0_1, ok := c.(*v0_1Configuration); ok { - if v0_1.Loglevel == Loglevel("") { - v0_1.Loglevel = Loglevel("info") + if v0_1.Log.Level == Loglevel("") { + if v0_1.Loglevel != Loglevel("") { + v0_1.Log.Level = v0_1.Loglevel + } else { + v0_1.Log.Level = Loglevel("info") + } + } + if v0_1.Loglevel != Loglevel("") { + v0_1.Loglevel = Loglevel("") } if v0_1.Storage.Type() == "" { return nil, errors.New("No storage configuration provided") diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index e74b92c4..1ae4ce9b 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -22,14 +22,14 @@ var configStruct = Configuration{ AccessLog struct { Disabled bool `yaml:"disabled,omitempty"` } `yaml:"accesslog,omitempty"` - Level Loglevel `yaml:"level"` + Level Loglevel `yaml:"level,omitempty"` Formatter string `yaml:"formatter,omitempty"` Fields map[string]interface{} `yaml:"fields,omitempty"` Hooks []LogHook `yaml:"hooks,omitempty"` }{ + Level: "info", Fields: map[string]interface{}{"environment": "test"}, }, - Loglevel: "info", Storage: Storage{ "s3": Parameters{ "region": "us-east-1", @@ -126,9 +126,9 @@ var configStruct = Configuration{ var configYamlV0_1 = ` version: 0.1 log: + level: info fields: environment: test -loglevel: info storage: s3: region: us-east-1 @@ -171,7 +171,8 @@ http: // storage driver with no parameters var inmemoryConfigYamlV0_1 = ` version: 0.1 -loglevel: info +log: + level: info storage: inmemory auth: silly: @@ -212,6 +213,7 @@ func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { configBytes, err := yaml.Marshal(suite.expectedConfig) c.Assert(err, IsNil) config, err := Parse(bytes.NewReader(configBytes)) + c.Log(string(configBytes)) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } @@ -334,9 +336,9 @@ func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { // TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the // log level will override the value provided in the yaml document func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { - suite.expectedConfig.Loglevel = "error" + suite.expectedConfig.Log.Level = "error" - os.Setenv("REGISTRY_LOGLEVEL", "error") + os.Setenv("REGISTRY_LOG_LEVEL", "error") config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) diff --git a/registry/registry.go b/registry/registry.go index 0737f7d0..f0708bcf 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -237,13 +237,6 @@ func configureReporting(app *handlers.App) http.Handler { // configureLogging prepares the context with a logger using the // configuration. func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { - if config.Log.Level == "" && config.Log.Formatter == "" { - // If no config for logging is set, fallback to deprecated "Loglevel". - log.SetLevel(logLevel(config.Loglevel)) - ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx)) - return ctx, nil - } - log.SetLevel(logLevel(config.Log.Level)) formatter := config.Log.Formatter diff --git a/vendor.conf b/vendor.conf index d0efe39d..972b915f 100644 --- a/vendor.conf +++ b/vendor.conf @@ -45,7 +45,7 @@ google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b -gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 +gopkg.in/yaml.v2 v2.2.1 rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE index a68e67f0..8dada3ed 100644 --- a/vendor/gopkg.in/yaml.v2/LICENSE +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -1,188 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -Copyright (c) 2011-2014 - Canonical Inc. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -This software is licensed under the LGPLv3, included below. + 1. Definitions. -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). - 0. Additional Definitions. + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and - 1. Exception to Section 3 of the GNU GPL. + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and - 2. Conveying Modified Versions. + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. - 3. Object Code Incorporating Material from Library Header Files. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. - b) Accompany the object code with a copy of the GNU GPL and this license - document. + END OF TERMS AND CONDITIONS - 4. Combined Works. + APPENDIX: How to apply the Apache License to your work. - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. + Copyright {yyyy} {name of copyright owner} - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. + http://www.apache.org/licenses/LICENSE-2.0 - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 00000000..866d74a7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md index d6c919e6..b50c6e87 100644 --- a/vendor/gopkg.in/yaml.v2/README.md +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -42,7 +42,7 @@ The package API for yaml v2 will remain stable as described in [gopkg.in](https: License ------- -The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. Example @@ -65,9 +65,14 @@ b: d: [3, 4] ` +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. type T struct { A string - B struct{C int; D []int ",flow"} + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } } func main() { diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go index 95ec014e..1f7e87e6 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -2,7 +2,6 @@ package yaml import ( "io" - "os" ) func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { @@ -48,9 +47,9 @@ func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err return n, nil } -// File read handler. -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_file.Read(buffer) +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) } // Set a string input. @@ -64,12 +63,12 @@ func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { } // Set a file input. -func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { if parser.read_handler != nil { panic("must set the input source only once") } - parser.read_handler = yaml_file_read_handler - parser.input_file = file + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r } // Set the source encoding. @@ -81,14 +80,13 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { } // Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { +func yaml_emitter_initialize(emitter *yaml_emitter_t) { *emitter = yaml_emitter_t{ buffer: make([]byte, output_buffer_size), raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), } - return true } // Destroy an emitter object. @@ -102,9 +100,10 @@ func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { return nil } -// File write handler. -func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_file.Write(buffer) +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) return err } @@ -118,12 +117,12 @@ func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]by } // Set a file output. -func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { if emitter.write_handler != nil { panic("must set the output target only once") } - emitter.write_handler = yaml_file_write_handler - emitter.output_file = file + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w } // Set the output encoding. @@ -252,41 +251,41 @@ func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { // // Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { *event = yaml_event_t{ typ: yaml_STREAM_START_EVENT, encoding: encoding, } - return true } // Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) bool { +func yaml_stream_end_event_initialize(event *yaml_event_t) { *event = yaml_event_t{ typ: yaml_STREAM_END_EVENT, } - return true } // Create DOCUMENT-START. -func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, implicit bool) bool { +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { *event = yaml_event_t{ typ: yaml_DOCUMENT_START_EVENT, version_directive: version_directive, tag_directives: tag_directives, implicit: implicit, } - return true } // Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { *event = yaml_event_t{ typ: yaml_DOCUMENT_END_EVENT, implicit: implicit, } - return true } ///* @@ -348,7 +347,7 @@ func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { } // Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { *event = yaml_event_t{ typ: yaml_MAPPING_START_EVENT, anchor: anchor, @@ -356,15 +355,13 @@ func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte implicit: implicit, style: yaml_style_t(style), } - return true } // Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { +func yaml_mapping_end_event_initialize(event *yaml_event_t) { *event = yaml_event_t{ typ: yaml_MAPPING_END_EVENT, } - return true } // Destroy an event object. @@ -471,7 +468,7 @@ func yaml_event_delete(event *yaml_event_t) { // } context // tag_directive *yaml_tag_directive_t // -// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. // // assert(document) // Non-NULL document object is expected. // diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index ec9d2710..e4e56e28 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -4,6 +4,7 @@ import ( "encoding" "encoding/base64" "fmt" + "io" "math" "reflect" "strconv" @@ -22,19 +23,22 @@ type node struct { kind int line, column int tag string - value string - implicit bool - children []*node - anchors map[string]*node + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node } // ---------------------------------------------------------------------------- // Parser, produces a node tree out of a libyaml event stream. type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool } func newParser(b []byte) *parser { @@ -42,21 +46,30 @@ func newParser(b []byte) *parser { if !yaml_parser_initialize(&p.parser) { panic("failed to initialize YAML emitter") } - if len(b) == 0 { b = []byte{'\n'} } - yaml_parser_set_input_string(&p.parser, b) - - p.skip() - if p.event.typ != yaml_STREAM_START_EVENT { - panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() return &p } +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + func (p *parser) destroy() { if p.event.typ != yaml_NO_EVENT { yaml_event_delete(&p.event) @@ -64,16 +77,35 @@ func (p *parser) destroy() { yaml_parser_delete(&p.parser) } -func (p *parser) skip() { - if p.event.typ != yaml_NO_EVENT { - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() } - yaml_event_delete(&p.event) + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ } if !yaml_parser_parse(&p.parser, &p.event) { p.fail() } + return p.event.typ } func (p *parser) fail() { @@ -81,6 +113,10 @@ func (p *parser) fail() { var line int if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } } else if p.parser.context_mark.line != 0 { line = p.parser.context_mark.line } @@ -103,7 +139,8 @@ func (p *parser) anchor(n *node, anchor []byte) { } func (p *parser) parse() *node { - switch p.event.typ { + p.init() + switch p.peek() { case yaml_SCALAR_EVENT: return p.scalar() case yaml_ALIAS_EVENT: @@ -118,9 +155,8 @@ func (p *parser) parse() *node { // Happens when attempting to decode an empty buffer. return nil default: - panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + panic("attempted to parse unknown event: " + p.event.typ.String()) } - panic("unreachable") } func (p *parser) node(kind int) *node { @@ -135,19 +171,20 @@ func (p *parser) document() *node { n := p.node(documentNode) n.anchors = make(map[string]*node) p.doc = n - p.skip() + p.expect(yaml_DOCUMENT_START_EVENT) n.children = append(n.children, p.parse()) - if p.event.typ != yaml_DOCUMENT_END_EVENT { - panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() + p.expect(yaml_DOCUMENT_END_EVENT) return n } func (p *parser) alias() *node { n := p.node(aliasNode) n.value = string(p.event.anchor) - p.skip() + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) return n } @@ -157,29 +194,29 @@ func (p *parser) scalar() *node { n.tag = string(p.event.tag) n.implicit = p.event.implicit p.anchor(n, p.event.anchor) - p.skip() + p.expect(yaml_SCALAR_EVENT) return n } func (p *parser) sequence() *node { n := p.node(sequenceNode) p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_SEQUENCE_END_EVENT { + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { n.children = append(n.children, p.parse()) } - p.skip() + p.expect(yaml_SEQUENCE_END_EVENT) return n } func (p *parser) mapping() *node { n := p.node(mappingNode) p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_MAPPING_END_EVENT { + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { n.children = append(n.children, p.parse(), p.parse()) } - p.skip() + p.expect(yaml_MAPPING_END_EVENT) return n } @@ -188,9 +225,10 @@ func (p *parser) mapping() *node { type decoder struct { doc *node - aliases map[string]bool + aliases map[*node]bool mapType reflect.Type terrors []string + strict bool } var ( @@ -198,11 +236,13 @@ var ( durationType = reflect.TypeOf(time.Duration(0)) defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) ) -func newDecoder() *decoder { - d := &decoder{mapType: defaultMapType} - d.aliases = make(map[string]bool) +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) return d } @@ -251,7 +291,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { // // If n holds a null value, prepare returns before doing anything. func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { return out, false, false } again := true @@ -308,16 +348,13 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) { } func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - an, ok := d.doc.anchors[n.value] - if !ok { - failf("unknown anchor '%s' referenced", n.value) - } - if d.aliases[n.value] { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. failf("anchor '%s' value contains itself", n.value) } - d.aliases[n.value] = true - good = d.unmarshal(an, out) - delete(d.aliases, n.value) + d.aliases[n] = true + good = d.unmarshal(n.alias, out) + delete(d.aliases, n) return good } @@ -329,7 +366,7 @@ func resetMap(out reflect.Value) { } } -func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { +func (d *decoder) scalar(n *node, out reflect.Value) bool { var tag string var resolved interface{} if n.tag == "" && !n.implicit { @@ -353,9 +390,26 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { } return true } - if s, ok := resolved.(string); ok && out.CanAddr() { - if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { - err := u.UnmarshalText([]byte(s)) + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) if err != nil { fail(err) } @@ -366,46 +420,54 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { case reflect.String: if tag == yaml_BINARY_TAG { out.SetString(resolved.(string)) - good = true - } else if resolved != nil { + return true + } + if resolved != nil { out.SetString(n.value) - good = true + return true } case reflect.Interface: if resolved == nil { out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) } else { out.Set(reflect.ValueOf(resolved)) } - good = true + return true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch resolved := resolved.(type) { case int: if !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) - good = true + return true } case int64: if !out.OverflowInt(resolved) { out.SetInt(resolved) - good = true + return true } case uint64: if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) - good = true + return true } case float64: if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) - good = true + return true } case string: if out.Type() == durationType { d, err := time.ParseDuration(resolved) if err == nil { out.SetInt(int64(d)) - good = true + return true } } } @@ -414,44 +476,49 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { case int: if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) - good = true + return true } case int64: if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) - good = true + return true } case uint64: if !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) - good = true + return true } case float64: if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) - good = true + return true } } case reflect.Bool: switch resolved := resolved.(type) { case bool: out.SetBool(resolved) - good = true + return true } case reflect.Float32, reflect.Float64: switch resolved := resolved.(type) { case int: out.SetFloat(float64(resolved)) - good = true + return true case int64: out.SetFloat(float64(resolved)) - good = true + return true case uint64: out.SetFloat(float64(resolved)) - good = true + return true case float64: out.SetFloat(resolved) - good = true + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true } case reflect.Ptr: if out.Type().Elem() == reflect.TypeOf(resolved) { @@ -459,13 +526,11 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { elem := reflect.New(out.Type().Elem()) elem.Elem().Set(reflect.ValueOf(resolved)) out.Set(elem) - good = true + return true } } - if !good { - d.terror(n, tag, out) - } - return good + d.terror(n, tag, out) + return false } func settableValueOf(i interface{}) reflect.Value { @@ -482,6 +547,10 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { switch out.Kind() { case reflect.Slice: out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } case reflect.Interface: // No type hints. Will have to use a generic sequence. iface = out @@ -500,7 +569,9 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { j++ } } - out.Set(out.Slice(0, j)) + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } if iface.IsValid() { iface.Set(out) } @@ -561,7 +632,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { } e := reflect.New(et).Elem() if d.unmarshal(n.children[i+1], e) { - out.SetMapIndex(k, e) + d.setMapIndex(n.children[i+1], out, k, e) } } } @@ -569,6 +640,14 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { return true } +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { outt := out.Type() if outt.Elem() != mapItemType { @@ -607,6 +686,19 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { } name := settableValueOf("") l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } for i := 0; i < l; i += 2 { ni := n.children[i] if isMerge(ni) { @@ -617,6 +709,13 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { continue } if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } var field reflect.Value if info.Inline == nil { field = out.Field(info.Num) @@ -624,6 +723,15 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { field = out.FieldByIndex(info.Inline) } d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) } } return true diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go index 9b3dc4a4..a1c2cc52 100644 --- a/vendor/gopkg.in/yaml.v2/emitterc.go +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -2,6 +2,7 @@ package yaml import ( "bytes" + "fmt" ) // Flush the buffer if needed. @@ -664,9 +665,8 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, return yaml_emitter_emit_mapping_start(emitter, event) default: return yaml_emitter_set_emitter_error(emitter, - "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) } - return false } // Expect ALIAS. @@ -843,7 +843,7 @@ func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event return true } -// Write an achor. +// Write an anchor. func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { if emitter.anchor_data.anchor == nil { return true @@ -995,10 +995,10 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { break_space = false space_break = false - preceeded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false ) emitter.scalar_data.value = value @@ -1017,9 +1017,9 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { flow_indicators = true } - preceeded_by_whitespace = true + preceded_by_whitespace = true for i, w := 0, 0; i < len(value); i += w { - w = width(value[0]) + w = width(value[i]) followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) if i == 0 { @@ -1048,7 +1048,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { block_indicators = true } case '#': - if preceeded_by_whitespace { + if preceded_by_whitespace { flow_indicators = true block_indicators = true } @@ -1089,7 +1089,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { } // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceeded_by_whitespace = is_blankz(value, i) + preceded_by_whitespace = is_blankz(value, i) } emitter.scalar_data.multiline = line_breaks diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go index b7edc799..a14435e8 100644 --- a/vendor/gopkg.in/yaml.v2/encode.go +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -2,12 +2,15 @@ package yaml import ( "encoding" + "fmt" + "io" "reflect" "regexp" "sort" "strconv" "strings" "time" + "unicode/utf8" ) type encoder struct { @@ -15,25 +18,39 @@ type encoder struct { event yaml_event_t out []byte flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool } -func newEncoder() (e *encoder) { - e = &encoder{} - e.must(yaml_emitter_initialize(&e.emitter)) +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) yaml_emitter_set_output_string(&e.emitter, &e.out) yaml_emitter_set_unicode(&e.emitter, true) - e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) - e.emit() - e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) - e.emit() return e } -func (e *encoder) finish() { - e.must(yaml_document_end_event_initialize(&e.event, true)) +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { e.emitter.open_ended = false - e.must(yaml_stream_end_event_initialize(&e.event)) + yaml_stream_end_event_initialize(&e.event) e.emit() } @@ -43,9 +60,7 @@ func (e *encoder) destroy() { func (e *encoder) emit() { // This will internally delete the e.event value. - if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { - e.must(false) - } + e.must(yaml_emitter_emit(&e.emitter, &e.event)) } func (e *encoder) must(ok bool) { @@ -58,13 +73,28 @@ func (e *encoder) must(ok bool) { } } +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { e.nilv() return } iface := in.Interface() - if m, ok := iface.(Marshaler); ok { + switch m := iface.(type) { + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: v, err := m.MarshalYAML() if err != nil { fail(err) @@ -74,31 +104,34 @@ func (e *encoder) marshal(tag string, in reflect.Value) { return } in = reflect.ValueOf(v) - } else if m, ok := iface.(encoding.TextMarshaler); ok { + case encoding.TextMarshaler: text, err := m.MarshalText() if err != nil { fail(err) } in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return } switch in.Kind() { case reflect.Interface: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } + e.marshal(tag, in.Elem()) case reflect.Map: e.mapv(tag, in) case reflect.Ptr: - if in.IsNil() { - e.nilv() + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) } else { e.marshal(tag, in.Elem()) } case reflect.Struct: - e.structv(tag, in) - case reflect.Slice: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: if in.Type().Elem() == mapItemType { e.itemsv(tag, in) } else { @@ -164,6 +197,22 @@ func (e *encoder) structv(tag string, in reflect.Value) { e.flow = info.Flow e.marshal("", value) } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } }) } @@ -174,10 +223,10 @@ func (e *encoder) mappingv(tag string, f func()) { e.flow = false style = yaml_FLOW_MAPPING_STYLE } - e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) e.emit() f() - e.must(yaml_mapping_end_event_initialize(&e.event)) + yaml_mapping_end_event_initialize(&e.event) e.emit() } @@ -223,23 +272,36 @@ var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0 func (e *encoder) stringv(tag string, in reflect.Value) { var style yaml_scalar_style_t s := in.String() - rtag, rs := resolve("", s) - if rtag == yaml_BINARY_TAG { - if tag == "" || tag == yaml_STR_TAG { - tag = rtag - s = rs.(string) - } else if tag == yaml_BINARY_TAG { + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { failf("explicitly tagged !!binary data must be base64-encoded") - } else { + } + if tag != "" { failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) } - if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else if strings.Contains(s, "\n") { + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): style = yaml_LITERAL_SCALAR_STYLE - } else { + case canUsePlain: style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } e.emitScalar(s, "", tag, style) } @@ -264,9 +326,20 @@ func (e *encoder) uintv(tag string, in reflect.Value) { e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + func (e *encoder) floatv(tag string, in reflect.Value) { - // FIXME: Handle 64 bits here. - s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) switch s { case "+Inf": s = ".inf" diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go index 0a7037ad..81d05dfe 100644 --- a/vendor/gopkg.in/yaml.v2/parserc.go +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -166,7 +166,6 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool default: panic("invalid parser state") } - return false } // Parse the production: diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go index d5fb0972..7c1f5fac 100644 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -93,9 +93,18 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { panic("read handler must be set") } + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + // If the EOF flag is set and the raw buffer is empty, do nothing. if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true } // Return if the buffer contains enough characters. @@ -247,7 +256,7 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { if parser.encoding == yaml_UTF16LE_ENCODING { low, high = 0, 1 } else { - high, low = 1, 0 + low, high = 1, 0 } // The UTF-16 encoding is not as simple as one might @@ -357,23 +366,26 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { if value <= 0x7F { // 0000 0000-0000 007F . 0xxxxxxx parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 } else if value <= 0x7FF { // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 } else if value <= 0xFFFF { // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 } else { // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 } - buffer_len += width parser.unread++ } @@ -386,6 +398,15 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { break } } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } parser.buffer = parser.buffer[:buffer_len] return true } diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index 93a86327..6c151db6 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -3,9 +3,10 @@ package yaml import ( "encoding/base64" "math" + "regexp" "strconv" "strings" - "unicode/utf8" + "time" ) type resolveMapItem struct { @@ -74,12 +75,14 @@ func longTag(tag string) string { func resolvableTag(tag string) bool { switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: return true } return false } +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + func resolve(tag string, in string) (rtag string, out interface{}) { if !resolvableTag(tag) { return tag, in @@ -89,6 +92,19 @@ func resolve(tag string, in string) (rtag string, out interface{}) { switch tag { case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } } failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) }() @@ -122,6 +138,15 @@ func resolve(tag string, in string) (rtag string, out interface{}) { case 'D', 'S': // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + plain := strings.Replace(in, "_", "", -1) intv, err := strconv.ParseInt(plain, 0, 64) if err == nil { @@ -135,9 +160,11 @@ func resolve(tag string, in string) (rtag string, out interface{}) { if err == nil { return yaml_INT_TAG, uintv } - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } } if strings.HasPrefix(plain, "0b") { intv, err := strconv.ParseInt(plain[2:], 2, 64) @@ -153,28 +180,20 @@ func resolve(tag string, in string) (rtag string, out interface{}) { return yaml_INT_TAG, uintv } } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt(plain[3:], 2, 64) + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, -int(intv) + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) } else { - return yaml_INT_TAG, -intv + return yaml_INT_TAG, intv } } } - // XXX Handle timestamps here. - default: panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") } } - if tag == yaml_BINARY_TAG { - return yaml_BINARY_TAG, in - } - if utf8.ValidString(in) { - return yaml_STR_TAG, in - } - return yaml_BINARY_TAG, encodeBase64(in) + return yaml_STR_TAG, in } // encodeBase64 encodes s as base64 that is broken up into multiple lines @@ -201,3 +220,39 @@ func encodeBase64(s string) string { } return string(out[:k]) } + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index fe93b190..077fd1dd 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -9,7 +9,7 @@ import ( // ************ // // The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in // some cases we are less restrictive that it requires. // // The process of transforming a YAML stream into a sequence of events is @@ -611,7 +611,7 @@ func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, co if directive { context = "while parsing a %TAG directive" } - return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) } func trace(args ...interface{}) func() { @@ -871,12 +871,6 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { required := parser.flow_level == 0 && parser.indent == parser.mark.column - // A simple key is required only when it is the first token in the current - // line. Therefore it is always allowed. But we add a check anyway. - if required && !parser.simple_key_allowed { - panic("should not happen") - } - // // If the current position may start a simple key, save it. // @@ -961,7 +955,7 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml } // Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each intendation level, append +// becomes less or equal to the column. For each indentation level, append // the BLOCK-END token. func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { // In the flow context, do nothing. @@ -969,7 +963,7 @@ func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { return true } - // Loop through the intendation levels in the stack. + // Loop through the indentation levels in the stack. for parser.indent > column { // Create a token and append it to the queue. token := yaml_token_t{ @@ -1546,7 +1540,7 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool // Unknown directive. } else { yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found uknown directive name") + start_mark, "found unknown directive name") return false } @@ -1944,7 +1938,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma } else { // It's either the '!' tag or not really a tag handle. If it's a %TAG // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && !(s[0] == '!' && s[1] == 0) { + if directive && string(s) != "!" { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected '!'") return false @@ -1959,6 +1953,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { //size_t length = head ? strlen((char *)head) : 0 var s []byte + hasTag := len(head) > 0 // Copy the head if needed. // @@ -2000,10 +1995,10 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } + hasTag = true } - // Check if the tag is non-empty. - if len(s) == 0 { + if !hasTag { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected tag URI") return false @@ -2085,14 +2080,14 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l return false } if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the intendation is greater than 0. + // Check that the indentation is greater than 0. if parser.buffer[parser.buffer_pos] == '0' { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an intendation indicator equal to 0") + start_mark, "found an indentation indicator equal to 0") return false } - // Get the intendation level and eat the indicator. + // Get the indentation level and eat the indicator. increment = as_digit(parser.buffer, parser.buffer_pos) skip(parser) } @@ -2102,7 +2097,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l if parser.buffer[parser.buffer_pos] == '0' { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an intendation indicator equal to 0") + start_mark, "found an indentation indicator equal to 0") return false } increment = as_digit(parser.buffer, parser.buffer_pos) @@ -2157,7 +2152,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l end_mark := parser.mark - // Set the intendation level if it was specified. + // Set the indentation level if it was specified. var indent int if increment > 0 { if parser.indent >= 0 { @@ -2217,7 +2212,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l leading_break = read_line(parser, leading_break) - // Eat the following intendation spaces and line breaks. + // Eat the following indentation spaces and line breaks. if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { return false } @@ -2245,15 +2240,15 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l return true } -// Scan intendation spaces and line breaks for a block scalar. Determine the -// intendation level if needed. +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { *end_mark = parser.mark - // Eat the intendation spaces and line breaks. + // Eat the indentation spaces and line breaks. max_indent := 0 for { - // Eat the intendation spaces. + // Eat the indentation spaces. if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } @@ -2267,10 +2262,10 @@ func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, br max_indent = parser.mark.column } - // Check for a tab character messing the intendation. + // Check for a tab character messing the indentation. if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an intendation space is expected") + start_mark, "found a tab character where an indentation space is expected") } // Have we found a non-empty line? @@ -2474,6 +2469,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si } } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + // Check if we are at the end of the scalar. if single { if parser.buffer[parser.buffer_pos] == '\'' { @@ -2486,10 +2485,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si } // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { // Consume a space or a tab character. @@ -2591,19 +2586,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b // Consume non-blank characters. for !is_blankz(parser.buffer, parser.buffer_pos) { - // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". - if parser.flow_level > 0 && - parser.buffer[parser.buffer_pos] == ':' && - !is_blankz(parser.buffer, parser.buffer_pos+1) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found unexpected ':'") - return false - } - // Check for indicators that may end a plain scalar. if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || parser.buffer[parser.buffer_pos] == '}')) { @@ -2655,10 +2641,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { - // Check for tab character that abuse intendation. + // Check for tab characters that abuse indentation. if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate intendation") + start_mark, "found a tab character that violates indentation") return false } @@ -2687,7 +2673,7 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b } } - // Check intendation level. + // Check indentation level. if parser.flow_level == 0 && parser.mark.column < indent { break } diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go index 5958822f..4c45e660 100644 --- a/vendor/gopkg.in/yaml.v2/sorter.go +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -51,6 +51,15 @@ func (l keyList) Less(i, j int) bool { } var ai, bi int var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { an = an*10 + int64(ar[ai]-'0') } diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go index 190362f2..a2dde608 100644 --- a/vendor/gopkg.in/yaml.v2/writerc.go +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -18,72 +18,9 @@ func yaml_emitter_flush(emitter *yaml_emitter_t) bool { return true } - // If the output encoding is UTF-8, we don't need to recode the buffer. - if emitter.encoding == yaml_UTF8_ENCODING { - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true - } - - // Recode the buffer into the raw buffer. - var low, high int - if emitter.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - pos := 0 - for pos < emitter.buffer_pos { - // See the "reader.c" code for more details on UTF-8 encoding. Note - // that we assume that the buffer contains a valid UTF-8 sequence. - - // Read the next UTF-8 character. - octet := emitter.buffer[pos] - - var w int - var value rune - switch { - case octet&0x80 == 0x00: - w, value = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, value = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, value = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, value = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = emitter.buffer[pos+k] - value = (value << 6) + (rune(octet) & 0x3F) - } - pos += w - - // Write the character. - if value < 0x10000 { - var b [2]byte - b[high] = byte(value >> 8) - b[low] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) - } else { - // Write the character using a surrogate pair (check "reader.c"). - var b [4]byte - value -= 0x10000 - b[high] = byte(0xD8 + (value >> 18)) - b[low] = byte((value >> 10) & 0xFF) - b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) - b[low+2] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) - } - } - - // Write the raw buffer. - if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) } emitter.buffer_pos = 0 - emitter.raw_buffer = emitter.raw_buffer[:0] return true } diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index e3e01edc..de85aa4c 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -9,6 +9,7 @@ package yaml import ( "errors" "fmt" + "io" "reflect" "strings" "sync" @@ -77,8 +78,65 @@ type Marshaler interface { // supported tag options. // func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decorder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) defer handleErr(&err) - d := newDecoder() + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) p := newParser(in) defer p.destroy() node := p.parse() @@ -99,8 +157,8 @@ func Unmarshal(in []byte, out interface{}) (err error) { // of the generated document will reflect the structure of the value itself. // Maps and pointers (to struct, string, int, etc) are accepted as the in value. // -// Struct fields are only unmarshalled if they are exported (have an upper case -// first letter), and are unmarshalled using the field name lowercased as the +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the // default key. Custom keys may be defined via the "yaml" name in the field // tag: the content preceding the first comma is used as the key, and the // following comma-separated options are used to tweak the marshalling process. @@ -114,21 +172,25 @@ func Unmarshal(in []byte, out interface{}) (err error) { // // omitempty Only include the field if it's not set to the zero // value for the type or to empty slices or maps. -// Does not apply to zero valued structs. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. // // flow Marshal using a flow style (useful for structs, -// sequences and maps. +// sequences and maps). // -// inline Inline the struct it's applied to, so its fields -// are processed as if they were part of the outer -// struct. +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. // // In addition, if the key is "-", the field is ignored. // // For example: // // type T struct { -// F int "a,omitempty" +// F int `yaml:"a,omitempty"` // B int // } // yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" @@ -138,12 +200,47 @@ func Marshal(in interface{}) (out []byte, err error) { defer handleErr(&err) e := newEncoder() defer e.destroy() - e.marshal("", reflect.ValueOf(in)) + e.marshalDoc("", reflect.ValueOf(in)) e.finish() out = e.out return } +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + func handleErr(err *error) { if v := recover(); v != nil { if e, ok := v.(yamlError); ok { @@ -199,6 +296,9 @@ type fieldInfo struct { Num int OmitEmpty bool Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int // Inline holds the field index if the field is part of an inlined struct. Inline []int @@ -221,7 +321,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { inlineMap := -1 for i := 0; i != n; i++ { field := st.Field(i) - if field.PkgPath != "" { + if field.PkgPath != "" && !field.Anonymous { continue // Private field } @@ -255,15 +355,14 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { if inline { switch field.Type.Kind() { - // TODO: Implement support for inline maps. - //case reflect.Map: - // if inlineMap >= 0 { - // return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - // } - // if field.Type.Key() != reflect.TypeOf("") { - // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - // } - // inlineMap = info.Num + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num case reflect.Struct: sinfo, err := getStructInfo(field.Type) if err != nil { @@ -279,6 +378,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { } else { finfo.Inline = append([]int{i}, finfo.Inline...) } + finfo.Id = len(fieldsList) fieldsMap[finfo.Key] = finfo fieldsList = append(fieldsList, finfo) } @@ -300,11 +400,16 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { return nil, errors.New(msg) } + info.Id = len(fieldsList) fieldsList = append(fieldsList, info) fieldsMap[info.Key] = info } - sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } fieldMapMutex.Lock() structMap[st] = sinfo @@ -312,8 +417,23 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { return sinfo, nil } +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + func isZero(v reflect.Value) bool { - switch v.Kind() { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { case reflect.String: return len(v.String()) == 0 case reflect.Interface, reflect.Ptr: @@ -324,13 +444,15 @@ func isZero(v reflect.Value) bool { return v.Len() == 0 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Bool: return !v.Bool() case reflect.Struct: vt := v.Type() - for i := v.NumField()-1; i >= 0; i-- { + for i := v.NumField() - 1; i >= 0; i-- { if vt.Field(i).PkgPath != "" { continue // Private field } diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go index d60a6b6b..e25cee56 100644 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -1,6 +1,7 @@ package yaml import ( + "fmt" "io" ) @@ -239,6 +240,27 @@ const ( yaml_MAPPING_END_EVENT // A MAPPING-END event. ) +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + // The event structure. type yaml_event_t struct { @@ -508,7 +530,7 @@ type yaml_parser_t struct { problem string // Error description. - // The byte about which the problem occured. + // The byte about which the problem occurred. problem_offset int problem_value int problem_mark yaml_mark_t @@ -521,9 +543,9 @@ type yaml_parser_t struct { read_handler yaml_read_handler_t // Read handler. - input_file io.Reader // File input data. - input []byte // String input data. - input_pos int + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int eof bool // EOF flag @@ -632,7 +654,7 @@ type yaml_emitter_t struct { write_handler yaml_write_handler_t // Write handler. output_buffer *[]byte // String output data. - output_file io.Writer // File output data. + output_writer io.Writer // File output data. buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. From 40efb602d68b76122e73be251a7555ba82b34c33 Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Mon, 30 Jul 2018 16:15:35 -0700 Subject: [PATCH 141/276] Add support to gracefully shutdown the server This is done by draining the connections for configured time after registry receives a SIGTERM signal. This adds a `draintimeout` setting under `HTTP`. Registry doesn't drain if draintimeout is not provided. Signed-off-by: Manish Tomar --- configuration/configuration.go | 4 ++ configuration/configuration_test.go | 14 +++--- docs/configuration.md | 3 ++ registry/registry.go | 29 +++++++++++- registry/registry_test.go | 70 +++++++++++++++++++++++++++++ 5 files changed, 113 insertions(+), 7 deletions(-) diff --git a/configuration/configuration.go b/configuration/configuration.go index 621d29ae..c87c9a30 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -84,6 +84,10 @@ type Configuration struct { // Location headers RelativeURLs bool `yaml:"relativeurls,omitempty"` + // Amount of time to wait for connection to drain before shutting down when registry + // receives a stop signal + DrainTimeout time.Duration `yaml:"draintimeout,omitempty"` + // TLS instructs the http server to listen with a TLS configuration. // This only support simple tls configuration with a cert and key. // Mostly, this is useful for testing situations or simple deployments diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index e74b92c4..81646190 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -7,6 +7,7 @@ import ( "reflect" "strings" "testing" + "time" . "gopkg.in/check.v1" "gopkg.in/yaml.v2" @@ -71,12 +72,13 @@ var configStruct = Configuration{ }, }, HTTP: struct { - Addr string `yaml:"addr,omitempty"` - Net string `yaml:"net,omitempty"` - Host string `yaml:"host,omitempty"` - Prefix string `yaml:"prefix,omitempty"` - Secret string `yaml:"secret,omitempty"` - RelativeURLs bool `yaml:"relativeurls,omitempty"` + Addr string `yaml:"addr,omitempty"` + Net string `yaml:"net,omitempty"` + Host string `yaml:"host,omitempty"` + Prefix string `yaml:"prefix,omitempty"` + Secret string `yaml:"secret,omitempty"` + RelativeURLs bool `yaml:"relativeurls,omitempty"` + DrainTimeout time.Duration `yaml:"draintimeout,omitempty"` TLS struct { Certificate string `yaml:"certificate,omitempty"` Key string `yaml:"key,omitempty"` diff --git a/docs/configuration.md b/docs/configuration.md index f2e83513..6fa0cf8e 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -206,6 +206,7 @@ http: host: https://myregistryaddress.org:5000 secret: asecretforlocaldevelopment relativeurls: false + draintimeout: 60s tls: certificate: /path/to/x509/public key: /path/to/x509/private @@ -738,6 +739,7 @@ http: host: https://myregistryaddress.org:5000 secret: asecretforlocaldevelopment relativeurls: false + draintimeout: 60s tls: certificate: /path/to/x509/public key: /path/to/x509/private @@ -767,6 +769,7 @@ registry. | `host` | no | A fully-qualified URL for an externally-reachable address for the registry. If present, it is used when creating generated URLs. Otherwise, these URLs are derived from client requests. | | `secret` | no | A random piece of data used to sign state that may be stored with the client to protect against tampering. For production environments you should generate a random piece of data using a cryptographically secure random generator. If you omit the secret, the registry will automatically generate a secret when it starts. **If you are building a cluster of registries behind a load balancer, you MUST ensure the secret is the same for all registries.**| | `relativeurls`| no | If `true`, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. **This option is not compatible with Docker 1.7 and earlier.**| +| `draintimeout`| no | Amount of time to wait for HTTP connections to drain before shutting after registry receives SIGTERM signal| ### `tls` diff --git a/registry/registry.go b/registry/registry.go index 0737f7d0..9580374c 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -8,6 +8,8 @@ import ( "io/ioutil" "net/http" "os" + "os/signal" + "syscall" "time" "rsc.io/letsencrypt" @@ -28,6 +30,9 @@ import ( "github.com/yvasiyarov/gorelic" ) +// this channel gets notified when process receives signal. It is global to ease unit testing +var quit = make(chan os.Signal, 1) + // ServeCmd is a cobra command for running the registry. var ServeCmd = &cobra.Command{ Use: "serve ", @@ -195,7 +200,29 @@ func (registry *Registry) ListenAndServe() error { dcontext.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) } - return registry.server.Serve(ln) + if config.HTTP.DrainTimeout == 0 { + return registry.server.Serve(ln) + } + + // setup channel to get notified on SIGTERM signal + signal.Notify(quit, syscall.SIGTERM) + serveErr := make(chan error) + + // Start serving in goroutine and listen for stop signal in main thread + go func() { + serveErr <- registry.server.Serve(ln) + }() + + select { + case err := <-serveErr: + return err + case <-quit: + dcontext.GetLogger(registry.app).Info("stopping server gracefully. Draining connections for ", config.HTTP.DrainTimeout) + // shutdown the server with a grace period of configured timeout + c, cancel := context.WithTimeout(context.Background(), config.HTTP.DrainTimeout) + defer cancel() + return registry.server.Shutdown(c) + } } func configureReporting(app *handlers.App) http.Handler { diff --git a/registry/registry_test.go b/registry/registry_test.go index 34673117..d8deb35e 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -1,10 +1,19 @@ package registry import ( + "bufio" + "context" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" "reflect" "testing" + "time" "github.com/docker/distribution/configuration" + _ "github.com/docker/distribution/registry/storage/driver/inmemory" ) // Tests to ensure nextProtos returns the correct protocols when: @@ -28,3 +37,64 @@ func TestNextProtos(t *testing.T) { t.Fatalf("expected protos to equal [http/1.1], got %s", protos) } } + +func setupRegistry() (*Registry, error) { + config := &configuration.Configuration{} + // TODO: this needs to change to something ephemeral as the test will fail if there is any server + // already listening on port 5000 + config.HTTP.Addr = ":5000" + config.HTTP.DrainTimeout = time.Duration(10) * time.Second + config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} + return NewRegistry(context.Background(), config) +} + +func TestGracefulShutdown(t *testing.T) { + registry, err := setupRegistry() + if err != nil { + t.Fatal(err) + } + + // run registry server + var errchan chan error + go func() { + errchan <- registry.ListenAndServe() + }() + select { + case err = <-errchan: + t.Fatalf("Error listening: %v", err) + default: + } + + // Wait for some unknown random time for server to start listening + time.Sleep(3 * time.Second) + + // send incomplete request + conn, err := net.Dial("tcp", "localhost:5000") + if err != nil { + t.Fatal(err) + } + fmt.Fprintf(conn, "GET /v2/ ") + + // send stop signal + quit <- os.Interrupt + time.Sleep(100 * time.Millisecond) + + // try connecting again. it shouldn't + _, err = net.Dial("tcp", "localhost:5000") + if err == nil { + t.Fatal("Managed to connect after stopping.") + } + + // make sure earlier request is not disconnected and response can be received + fmt.Fprintf(conn, "HTTP/1.1\r\nHost: 127.0.0.1\r\n\r\n") + resp, err := http.ReadResponse(bufio.NewReader(conn), nil) + if err != nil { + t.Fatal(err) + } + if resp.Status != "200 OK" { + t.Error("response status is not 200 OK: ", resp.Status) + } + if body, err := ioutil.ReadAll(resp.Body); err != nil || string(body) != "{}" { + t.Error("Body is not {}; ", string(body)) + } +} From 5e4b81a578256648d897c5bf6ea9b84665763e98 Mon Sep 17 00:00:00 2001 From: Andrew Leung Date: Mon, 20 Aug 2018 10:01:40 -0700 Subject: [PATCH 142/276] Use references terminology instead of layers. Signed-off-by: Andrew Leung --- cmd/registry/config-cache.yml | 2 +- cmd/registry/config-dev.yml | 2 +- configuration/configuration.go | 2 +- docs/configuration.md | 6 +++--- notifications/bridge.go | 30 +++++++++++++++--------------- notifications/bridge_test.go | 10 +++++----- notifications/event.go | 4 ++-- registry/handlers/app.go | 2 +- 8 files changed, 29 insertions(+), 29 deletions(-) diff --git a/cmd/registry/config-cache.yml b/cmd/registry/config-cache.yml index f7e47b82..d648303d 100644 --- a/cmd/registry/config-cache.yml +++ b/cmd/registry/config-cache.yml @@ -30,7 +30,7 @@ redis: writetimeout: 10ms notifications: events: - manifestlayers: true + includereferences: true endpoints: - name: local-8082 url: http://localhost:5003/callback diff --git a/cmd/registry/config-dev.yml b/cmd/registry/config-dev.yml index 25002569..9539bae4 100644 --- a/cmd/registry/config-dev.yml +++ b/cmd/registry/config-dev.yml @@ -48,7 +48,7 @@ redis: writetimeout: 10ms notifications: events: - manifestlayers: true + includereferences: true endpoints: - name: local-5003 url: http://localhost:5003/callback diff --git a/configuration/configuration.go b/configuration/configuration.go index 59841382..0b5b46aa 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -568,7 +568,7 @@ type Endpoint struct { // Events configures notification events. type Events struct { - ManifestLayers bool `yaml:"manifestlayers"` // include layer data in manifest events + IncludeReferences bool `yaml:"includereferences"` // include reference data in manifest events } //Ignore configures mediaTypes and actions of the event, that it won't be propagated diff --git a/docs/configuration.md b/docs/configuration.md index 7a6b9b2c..3f346439 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -227,7 +227,7 @@ http: disabled: false notifications: events: - manifestlayers: true + includereferences: true endpoints: - name: alistener disabled: false @@ -856,7 +856,7 @@ settings for the registry. ```none notifications: events: - manifestlayers: true + includereferences: true endpoints: - name: alistener disabled: false @@ -906,7 +906,7 @@ The `events` structure configures the information provided in event notification | Parameter | Required | Description | |-----------|----------|-------------------------------------------------------| -| `manifestlayers` | no | If `true`, include layer information in manifest events. | +| `includereferences` | no | If `true`, include reference information in manifest events. | ## `redis` diff --git a/notifications/bridge.go b/notifications/bridge.go index 1a58c2bb..efc73f68 100644 --- a/notifications/bridge.go +++ b/notifications/bridge.go @@ -12,12 +12,12 @@ import ( ) type bridge struct { - ub URLBuilder - manifestLayers bool - actor ActorRecord - source SourceRecord - request RequestRecord - sink Sink + ub URLBuilder + includeReferences bool + actor ActorRecord + source SourceRecord + request RequestRecord + sink Sink } var _ Listener = &bridge{} @@ -32,14 +32,14 @@ type URLBuilder interface { // using the actor and source. Any urls populated in the events created by // this bridge will be created using the URLBuilder. // TODO(stevvooe): Update this to simply take a context.Context object. -func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink, manifestLayers bool) Listener { +func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink, includeReferences bool) Listener { return &bridge{ - ub: ub, - manifestLayers: manifestLayers, - actor: actor, - source: source, - request: request, - sink: sink, + ub: ub, + includeReferences: includeReferences, + actor: actor, + source: source, + request: request, + sink: sink, } } @@ -146,8 +146,8 @@ func (b *bridge) createManifestEvent(action string, repo reference.Named, sm dis event.Target.Length = desc.Size event.Target.Size = desc.Size event.Target.Digest = desc.Digest - if b.manifestLayers { - event.Target.Layers = append(event.Target.Layers, manifest.References()...) + if b.includeReferences { + event.Target.References = append(event.Target.References, manifest.References()...) } ref, err := reference.WithDigest(repo, event.Target.Digest) diff --git a/notifications/bridge_test.go b/notifications/bridge_test.go index cdf500c3..87c868a5 100644 --- a/notifications/bridge_test.go +++ b/notifications/bridge_test.go @@ -180,12 +180,12 @@ func checkCommonManifest(t *testing.T, action string, events ...Event) { t.Fatalf("incorrect url passed: \n%q != \n%q", event.Target.URL, u) } - if len(event.Target.Layers) != len(layers) { - t.Fatalf("unexpected number of layers %v != %v", len(event.Target.Layers), len(layers)) + if len(event.Target.References) != len(layers) { + t.Fatalf("unexpected number of references %v != %v", len(event.Target.References), len(layers)) } - for i, layer := range event.Target.Layers { - if layer.Digest != layers[i].BlobSum { - t.Fatalf("unexpected layer: %q != %q", layer.Digest, layers[i].BlobSum) + for i, targetReference := range event.Target.References { + if targetReference.Digest != layers[i].BlobSum { + t.Fatalf("unexpected reference: %q != %q", targetReference.Digest, layers[i].BlobSum) } } } diff --git a/notifications/event.go b/notifications/event.go index ab74c675..54940a46 100644 --- a/notifications/event.go +++ b/notifications/event.go @@ -72,8 +72,8 @@ type Event struct { // Tag provides the tag Tag string `json:"tag,omitempty"` - // Layers provides the layers descriptors. - Layers []distribution.Descriptor `json:"layers,omitempty"` + // References provides the references descriptors. + References []distribution.Descriptor `json:"references,omitempty"` } `json:"target,omitempty"` // Request covers the request that generated the event. diff --git a/registry/handlers/app.go b/registry/handlers/app.go index 8ccfffe5..8c1f18e6 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -869,7 +869,7 @@ func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listene } request := notifications.NewRequestRecord(dcontext.GetRequestID(ctx), r) - return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink, app.Config.Notifications.EventConfig.ManifestLayers) + return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink, app.Config.Notifications.EventConfig.IncludeReferences) } // nameRequired returns true if the route requires a name. From 0b0d470281967fddab5af0ee5add93298518b9a1 Mon Sep 17 00:00:00 2001 From: David Wu Date: Mon, 20 Aug 2018 08:22:09 -0700 Subject: [PATCH 143/276] use aws sdk to validate regions Signed-off-by: David Wu --- registry/storage/driver/s3-aws/s3.go | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 34f82245..9cd87dba 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -31,6 +31,7 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" @@ -105,25 +106,11 @@ type DriverParameters struct { } func init() { - for _, region := range []string{ - "us-east-1", - "us-east-2", - "us-west-1", - "us-west-2", - "eu-west-1", - "eu-west-2", - "eu-central-1", - "ap-south-1", - "ap-southeast-1", - "ap-southeast-2", - "ap-northeast-1", - "ap-northeast-2", - "sa-east-1", - "cn-north-1", - "us-gov-west-1", - "ca-central-1", - } { - validRegions[region] = struct{}{} + partitions := endpoints.DefaultPartitions() + for _, p := range partitions { + for region := range p.Regions() { + validRegions[region] = struct{}{} + } } for _, objectACL := range []string{ From 0a302c7fa99709525a4c57d4e91b568b1dee366a Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Mon, 20 Aug 2018 11:47:54 -0700 Subject: [PATCH 144/276] Remove Aaron Lehmann from maintainers file Signed-off-by: Derek McGowan --- MAINTAINERS | 6 ------ 1 file changed, 6 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index bf0d8eb0..5c316238 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -187,7 +187,6 @@ distribution defers to the [Technical Steering Committee](https://github.com/mob [Org.Maintainers] people = [ - "aaronlehmann", "dmcgowan", "dmp42", "stevvooe", @@ -203,11 +202,6 @@ distribution defers to the [Technical Steering Committee](https://github.com/mob # ADD YOURSELF HERE IN ALPHABETICAL ORDER - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - [people.dmcgowan] Name = "Derek McGowan" Email = "derek@mcgstyle.net" From 610440eb19d980fb46fc23abd1066f321665f088 Mon Sep 17 00:00:00 2001 From: Olivier Date: Mon, 20 Aug 2018 11:15:14 -0700 Subject: [PATCH 145/276] Add new reviewers Signed-off-by: Olivier --- MAINTAINERS | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 5c316238..3183620c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -192,7 +192,12 @@ distribution defers to the [Technical Steering Committee](https://github.com/mob "stevvooe", ] [Org.Reviewers] - people = [] + people = [ + "manishtomar", + "caervs", + "davidswu", + "RobbKistler" + ] [people] @@ -202,6 +207,16 @@ distribution defers to the [Technical Steering Committee](https://github.com/mob # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.caervs] + Name = "Ryan Abrams" + Email = "rdabrams@gmail.com" + GitHub = "caervs" + + [people.davidswu] + Name = "David Wu" + Email = "dwu7401@gmail.com" + GitHub = "davidswu" + [people.dmcgowan] Name = "Derek McGowan" Email = "derek@mcgstyle.net" @@ -212,6 +227,16 @@ distribution defers to the [Technical Steering Committee](https://github.com/mob Email = "olivier@docker.com" GitHub = "dmp42" + [people.manishtomar] + Name = "Manish Tomar" + Email = "manish.tomar@docker.com" + GitHub = "manishtomar" + + [people.RobbKistler] + Name = "Robb Kistler" + Email = "robb.kistler@docker.com" + GitHub = "RobbKistler" + [people.stevvooe] Name = "Stephen Day" Email = "stephen.day@docker.com" From 0101db11effc4f8bfe3a3fb484924138177b91ef Mon Sep 17 00:00:00 2001 From: Ryan Abrams Date: Thu, 23 Aug 2018 16:04:53 -0700 Subject: [PATCH 146/276] Replace tab with space in nginx config Quick follow-up to #9223. This gives better consistency and readability to the file. Signed-off-by: Ryan Abrams --- contrib/compose/nginx/docker-registry-v2.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/compose/nginx/docker-registry-v2.conf b/contrib/compose/nginx/docker-registry-v2.conf index df698c79..2fd2ccde 100644 --- a/contrib/compose/nginx/docker-registry-v2.conf +++ b/contrib/compose/nginx/docker-registry-v2.conf @@ -4,6 +4,6 @@ proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_read_timeout 900; -proxy_send_timeout 300; +proxy_send_timeout 300; proxy_request_buffering off; (see issue #2292 - https://github.com/moby/moby/issues/2292) proxy_http_version 1.1; From f95ac7db955a57c3740fb7b6b9c73164f0a4c5f7 Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Fri, 24 Aug 2018 14:55:36 -0700 Subject: [PATCH 147/276] fix doc - thanks @dmp42 Signed-off-by: Manish Tomar --- docs/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration.md b/docs/configuration.md index 6fa0cf8e..0a3642ef 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -769,7 +769,7 @@ registry. | `host` | no | A fully-qualified URL for an externally-reachable address for the registry. If present, it is used when creating generated URLs. Otherwise, these URLs are derived from client requests. | | `secret` | no | A random piece of data used to sign state that may be stored with the client to protect against tampering. For production environments you should generate a random piece of data using a cryptographically secure random generator. If you omit the secret, the registry will automatically generate a secret when it starts. **If you are building a cluster of registries behind a load balancer, you MUST ensure the secret is the same for all registries.**| | `relativeurls`| no | If `true`, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. **This option is not compatible with Docker 1.7 and earlier.**| -| `draintimeout`| no | Amount of time to wait for HTTP connections to drain before shutting after registry receives SIGTERM signal| +| `draintimeout`| no | Amount of time to wait for HTTP connections to drain before shutting down after registry receives SIGTERM signal| ### `tls` From 8d7e4cd38828ebac1e19ad3065e2434785a26c8f Mon Sep 17 00:00:00 2001 From: David Wu Date: Tue, 4 Sep 2018 17:35:16 -0700 Subject: [PATCH 148/276] fix goimports and gofmt Signed-off-by: David Wu --- registry/auth/htpasswd/access.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/registry/auth/htpasswd/access.go b/registry/auth/htpasswd/access.go index 034c503a..eddf7ac3 100644 --- a/registry/auth/htpasswd/access.go +++ b/registry/auth/htpasswd/access.go @@ -10,13 +10,14 @@ import ( "crypto/rand" "encoding/base64" "fmt" - "golang.org/x/crypto/bcrypt" "net/http" "os" "path/filepath" "sync" "time" + "golang.org/x/crypto/bcrypt" + dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" ) From 5a74b806f092ad2877caebee42e19bf495230ed1 Mon Sep 17 00:00:00 2001 From: mlmhl <409107750@qq.com> Date: Mon, 4 Dec 2017 11:31:10 +0800 Subject: [PATCH 149/276] update github.com/ncw/swift package in vendor to avoid potential memory leaks Signed-off-by: mlmhl <409107750@qq.com> --- vendor.conf | 2 +- vendor/github.com/ncw/swift/README.md | 2 + vendor/github.com/ncw/swift/auth_v3.go | 3 +- vendor/github.com/ncw/swift/dlo.go | 136 +++++ vendor/github.com/ncw/swift/largeobjects.go | 448 +++++++++++++++++ vendor/github.com/ncw/swift/slo.go | 168 +++++++ vendor/github.com/ncw/swift/swift.go | 469 ++++++++++++++---- .../github.com/ncw/swift/swifttest/server.go | 274 ++++++++-- vendor/github.com/ncw/swift/timeout_reader.go | 4 +- .../github.com/ncw/swift/watchdog_reader.go | 43 +- 10 files changed, 1387 insertions(+), 162 deletions(-) create mode 100644 vendor/github.com/ncw/swift/dlo.go create mode 100644 vendor/github.com/ncw/swift/largeobjects.go create mode 100644 vendor/github.com/ncw/swift/slo.go diff --git a/vendor.conf b/vendor.conf index 31bb0342..bcb2d1f0 100644 --- a/vendor.conf +++ b/vendor.conf @@ -23,11 +23,11 @@ github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef -github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd +github.com/ncw/swift c95c6e5c2d1a3d37fc44c8c6dc9e231c7500667d github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 diff --git a/vendor/github.com/ncw/swift/README.md b/vendor/github.com/ncw/swift/README.md index 2cc24cff..5094a467 100644 --- a/vendor/github.com/ncw/swift/README.md +++ b/vendor/github.com/ncw/swift/README.md @@ -138,3 +138,5 @@ Contributors - Cezar Sa Espinola - Sam Gunaratne - Richard Scothern +- Michel Couillard +- Christopher Waldon diff --git a/vendor/github.com/ncw/swift/auth_v3.go b/vendor/github.com/ncw/swift/auth_v3.go index 21e96718..64dcabc9 100644 --- a/vendor/github.com/ncw/swift/auth_v3.go +++ b/vendor/github.com/ncw/swift/auth_v3.go @@ -117,7 +117,7 @@ func (auth *v3Auth) Request(c *Connection) (*http.Request, error) { v3 := v3AuthRequest{} - if c.UserName == "" { + if c.UserName == "" && c.UserId == "" { v3.Auth.Identity.Methods = []string{v3AuthMethodToken} v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey} } else { @@ -125,6 +125,7 @@ func (auth *v3Auth) Request(c *Connection) (*http.Request, error) { v3.Auth.Identity.Password = &v3AuthPassword{ User: v3User{ Name: c.UserName, + Id: c.UserId, Password: c.ApiKey, }, } diff --git a/vendor/github.com/ncw/swift/dlo.go b/vendor/github.com/ncw/swift/dlo.go new file mode 100644 index 00000000..e2e2aa97 --- /dev/null +++ b/vendor/github.com/ncw/swift/dlo.go @@ -0,0 +1,136 @@ +package swift + +import ( + "os" +) + +// DynamicLargeObjectCreateFile represents an open static large object +type DynamicLargeObjectCreateFile struct { + largeObjectCreateFile +} + +// DynamicLargeObjectCreateFile creates a dynamic large object +// returning an object which satisfies io.Writer, io.Seeker, io.Closer +// and io.ReaderFrom. The flags are as passes to the +// largeObjectCreate method. +func (c *Connection) DynamicLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) { + lo, err := c.largeObjectCreate(opts) + if err != nil { + return nil, err + } + + return withBuffer(opts, &DynamicLargeObjectCreateFile{ + largeObjectCreateFile: *lo, + }), nil +} + +// DynamicLargeObjectCreate creates or truncates an existing dynamic +// large object returning a writeable object. This sets opts.Flags to +// an appropriate value before calling DynamicLargeObjectCreateFile +func (c *Connection) DynamicLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) { + opts.Flags = os.O_TRUNC | os.O_CREATE + return c.DynamicLargeObjectCreateFile(opts) +} + +// DynamicLargeObjectDelete deletes a dynamic large object and all of its segments. +func (c *Connection) DynamicLargeObjectDelete(container string, path string) error { + return c.LargeObjectDelete(container, path) +} + +// DynamicLargeObjectMove moves a dynamic large object from srcContainer, srcObjectName to dstContainer, dstObjectName +func (c *Connection) DynamicLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error { + info, headers, err := c.Object(dstContainer, srcObjectName) + if err != nil { + return err + } + + segmentContainer, segmentPath := parseFullPath(headers["X-Object-Manifest"]) + if err := c.createDLOManifest(dstContainer, dstObjectName, segmentContainer+"/"+segmentPath, info.ContentType); err != nil { + return err + } + + if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil { + return err + } + + return nil +} + +// createDLOManifest creates a dynamic large object manifest +func (c *Connection) createDLOManifest(container string, objectName string, prefix string, contentType string) error { + headers := make(Headers) + headers["X-Object-Manifest"] = prefix + manifest, err := c.ObjectCreate(container, objectName, false, "", contentType, headers) + if err != nil { + return err + } + + if err := manifest.Close(); err != nil { + return err + } + + return nil +} + +// Close satisfies the io.Closer interface +func (file *DynamicLargeObjectCreateFile) Close() error { + return file.Flush() +} + +func (file *DynamicLargeObjectCreateFile) Flush() error { + err := file.conn.createDLOManifest(file.container, file.objectName, file.segmentContainer+"/"+file.prefix, file.contentType) + if err != nil { + return err + } + return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size()) +} + +func (c *Connection) getAllDLOSegments(segmentContainer, segmentPath string) ([]Object, error) { + //a simple container listing works 99.9% of the time + segments, err := c.ObjectsAll(segmentContainer, &ObjectsOpts{Prefix: segmentPath}) + if err != nil { + return nil, err + } + + hasObjectName := make(map[string]struct{}) + for _, segment := range segments { + hasObjectName[segment.Name] = struct{}{} + } + + //The container listing might be outdated (i.e. not contain all existing + //segment objects yet) because of temporary inconsistency (Swift is only + //eventually consistent!). Check its completeness. + segmentNumber := 0 + for { + segmentNumber++ + segmentName := getSegment(segmentPath, segmentNumber) + if _, seen := hasObjectName[segmentName]; seen { + continue + } + + //This segment is missing in the container listing. Use a more reliable + //request to check its existence. (HEAD requests on segments are + //guaranteed to return the correct metadata, except for the pathological + //case of an outage of large parts of the Swift cluster or its network, + //since every segment is only written once.) + segment, _, err := c.Object(segmentContainer, segmentName) + switch err { + case nil: + //found new segment -> add it in the correct position and keep + //going, more might be missing + if segmentNumber <= len(segments) { + segments = append(segments[:segmentNumber], segments[segmentNumber-1:]...) + segments[segmentNumber-1] = segment + } else { + segments = append(segments, segment) + } + continue + case ObjectNotFound: + //This segment is missing. Since we upload segments sequentially, + //there won't be any more segments after it. + return segments, nil + default: + return nil, err //unexpected error + } + } +} diff --git a/vendor/github.com/ncw/swift/largeobjects.go b/vendor/github.com/ncw/swift/largeobjects.go new file mode 100644 index 00000000..bec640b0 --- /dev/null +++ b/vendor/github.com/ncw/swift/largeobjects.go @@ -0,0 +1,448 @@ +package swift + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + gopath "path" + "strconv" + "strings" + "time" +) + +// NotLargeObject is returned if an operation is performed on an object which isn't large. +var NotLargeObject = errors.New("Not a large object") + +// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded +var readAfterWriteTimeout = 15 * time.Second + +// readAfterWriteWait defines the time to sleep between two retries +var readAfterWriteWait = 200 * time.Millisecond + +// largeObjectCreateFile represents an open static or dynamic large object +type largeObjectCreateFile struct { + conn *Connection + container string + objectName string + currentLength int64 + filePos int64 + chunkSize int64 + segmentContainer string + prefix string + contentType string + checkHash bool + segments []Object + headers Headers + minChunkSize int64 +} + +func swiftSegmentPath(path string) (string, error) { + checksum := sha1.New() + random := make([]byte, 32) + if _, err := rand.Read(random); err != nil { + return "", err + } + path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) + return strings.TrimLeft(strings.TrimRight("segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil +} + +func getSegment(segmentPath string, partNumber int) string { + return fmt.Sprintf("%s/%016d", segmentPath, partNumber) +} + +func parseFullPath(manifest string) (container string, prefix string) { + components := strings.SplitN(manifest, "/", 2) + container = components[0] + if len(components) > 1 { + prefix = components[1] + } + return container, prefix +} + +func (headers Headers) IsLargeObjectDLO() bool { + _, isDLO := headers["X-Object-Manifest"] + return isDLO +} + +func (headers Headers) IsLargeObjectSLO() bool { + _, isSLO := headers["X-Static-Large-Object"] + return isSLO +} + +func (headers Headers) IsLargeObject() bool { + return headers.IsLargeObjectSLO() || headers.IsLargeObjectDLO() +} + +func (c *Connection) getAllSegments(container string, path string, headers Headers) (string, []Object, error) { + if manifest, isDLO := headers["X-Object-Manifest"]; isDLO { + segmentContainer, segmentPath := parseFullPath(manifest) + segments, err := c.getAllDLOSegments(segmentContainer, segmentPath) + return segmentContainer, segments, err + } + if headers.IsLargeObjectSLO() { + return c.getAllSLOSegments(container, path) + } + return "", nil, NotLargeObject +} + +// LargeObjectOpts describes how a large object should be created +type LargeObjectOpts struct { + Container string // Name of container to place object + ObjectName string // Name of object + Flags int // Creation flags + CheckHash bool // If set Check the hash + Hash string // If set use this hash to check + ContentType string // Content-Type of the object + Headers Headers // Additional headers to upload the object with + ChunkSize int64 // Size of chunks of the object, defaults to 10MB if not set + MinChunkSize int64 // Minimum chunk size, automatically set for SLO's based on info + SegmentContainer string // Name of the container to place segments + SegmentPrefix string // Prefix to use for the segments + NoBuffer bool // Prevents using a bufio.Writer to write segments +} + +type LargeObjectFile interface { + io.Writer + io.Seeker + io.Closer + Size() int64 + Flush() error +} + +// largeObjectCreate creates a large object at opts.Container, opts.ObjectName. +// +// opts.Flags can have the following bits set +// os.TRUNC - remove the contents of the large object if it exists +// os.APPEND - write at the end of the large object +func (c *Connection) largeObjectCreate(opts *LargeObjectOpts) (*largeObjectCreateFile, error) { + var ( + segmentPath string + segmentContainer string + segments []Object + currentLength int64 + err error + ) + + if opts.SegmentPrefix != "" { + segmentPath = opts.SegmentPrefix + } else if segmentPath, err = swiftSegmentPath(opts.ObjectName); err != nil { + return nil, err + } + + if info, headers, err := c.Object(opts.Container, opts.ObjectName); err == nil { + if opts.Flags&os.O_TRUNC != 0 { + c.LargeObjectDelete(opts.Container, opts.ObjectName) + } else { + currentLength = info.Bytes + if headers.IsLargeObject() { + segmentContainer, segments, err = c.getAllSegments(opts.Container, opts.ObjectName, headers) + if err != nil { + return nil, err + } + if len(segments) > 0 { + segmentPath = gopath.Dir(segments[0].Name) + } + } else { + if err = c.ObjectMove(opts.Container, opts.ObjectName, opts.Container, getSegment(segmentPath, 1)); err != nil { + return nil, err + } + segments = append(segments, info) + } + } + } else if err != ObjectNotFound { + return nil, err + } + + // segmentContainer is not empty when the manifest already existed + if segmentContainer == "" { + if opts.SegmentContainer != "" { + segmentContainer = opts.SegmentContainer + } else { + segmentContainer = opts.Container + "_segments" + } + } + + file := &largeObjectCreateFile{ + conn: c, + checkHash: opts.CheckHash, + container: opts.Container, + objectName: opts.ObjectName, + chunkSize: opts.ChunkSize, + minChunkSize: opts.MinChunkSize, + headers: opts.Headers, + segmentContainer: segmentContainer, + prefix: segmentPath, + segments: segments, + currentLength: currentLength, + } + + if file.chunkSize == 0 { + file.chunkSize = 10 * 1024 * 1024 + } + + if file.minChunkSize > file.chunkSize { + file.chunkSize = file.minChunkSize + } + + if opts.Flags&os.O_APPEND != 0 { + file.filePos = currentLength + } + + return file, nil +} + +// LargeObjectDelete deletes the large object named by container, path +func (c *Connection) LargeObjectDelete(container string, objectName string) error { + _, headers, err := c.Object(container, objectName) + if err != nil { + return err + } + + var objects [][]string + if headers.IsLargeObject() { + segmentContainer, segments, err := c.getAllSegments(container, objectName, headers) + if err != nil { + return err + } + for _, obj := range segments { + objects = append(objects, []string{segmentContainer, obj.Name}) + } + } + objects = append(objects, []string{container, objectName}) + + info, err := c.cachedQueryInfo() + if err == nil && info.SupportsBulkDelete() && len(objects) > 0 { + filenames := make([]string, len(objects)) + for i, obj := range objects { + filenames[i] = obj[0] + "/" + obj[1] + } + _, err = c.doBulkDelete(filenames) + // Don't fail on ObjectNotFound because eventual consistency + // makes this situation normal. + if err != nil && err != Forbidden && err != ObjectNotFound { + return err + } + } else { + for _, obj := range objects { + if err := c.ObjectDelete(obj[0], obj[1]); err != nil { + return err + } + } + } + + return nil +} + +// LargeObjectGetSegments returns all the segments that compose an object +// If the object is a Dynamic Large Object (DLO), it just returns the objects +// that have the prefix as indicated by the manifest. +// If the object is a Static Large Object (SLO), it retrieves the JSON content +// of the manifest and return all the segments of it. +func (c *Connection) LargeObjectGetSegments(container string, path string) (string, []Object, error) { + _, headers, err := c.Object(container, path) + if err != nil { + return "", nil, err + } + + return c.getAllSegments(container, path, headers) +} + +// Seek sets the offset for the next write operation +func (file *largeObjectCreateFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + file.filePos = offset + case 1: + file.filePos += offset + case 2: + file.filePos = file.currentLength + offset + default: + return -1, fmt.Errorf("invalid value for whence") + } + if file.filePos < 0 { + return -1, fmt.Errorf("negative offset") + } + return file.filePos, nil +} + +func (file *largeObjectCreateFile) Size() int64 { + return file.currentLength +} + +func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err error) { + endTimer := time.NewTimer(readAfterWriteTimeout) + defer endTimer.Stop() + waitingTime := readAfterWriteWait + for { + var headers Headers + var sz int64 + if headers, sz, err = fn(); err == nil { + if !headers.IsLargeObjectDLO() || (expectedSize == 0 && sz > 0) || expectedSize == sz { + return + } + } else { + return + } + waitTimer := time.NewTimer(waitingTime) + select { + case <-endTimer.C: + waitTimer.Stop() + err = fmt.Errorf("Timeout expired while waiting for object to have size == %d, got: %d", expectedSize, sz) + return + case <-waitTimer.C: + waitingTime *= 2 + } + } +} + +func (c *Connection) waitForSegmentsToShowUp(container, objectName string, expectedSize int64) (err error) { + err = withLORetry(expectedSize, func() (Headers, int64, error) { + var info Object + var headers Headers + info, headers, err = c.objectBase(container, objectName) + if err != nil { + return headers, 0, err + } + return headers, info.Bytes, nil + }) + return +} + +// Write satisfies the io.Writer interface +func (file *largeObjectCreateFile) Write(buf []byte) (int, error) { + var sz int64 + var relativeFilePos int + writeSegmentIdx := 0 + for i, obj := range file.segments { + if file.filePos < sz+obj.Bytes || (i == len(file.segments)-1 && file.filePos < sz+file.minChunkSize) { + relativeFilePos = int(file.filePos - sz) + break + } + writeSegmentIdx++ + sz += obj.Bytes + } + sizeToWrite := len(buf) + for offset := 0; offset < sizeToWrite; { + newSegment, n, err := file.writeSegment(buf[offset:], writeSegmentIdx, relativeFilePos) + if err != nil { + return 0, err + } + if writeSegmentIdx < len(file.segments) { + file.segments[writeSegmentIdx] = *newSegment + } else { + file.segments = append(file.segments, *newSegment) + } + offset += n + writeSegmentIdx++ + relativeFilePos = 0 + } + file.filePos += int64(sizeToWrite) + file.currentLength = 0 + for _, obj := range file.segments { + file.currentLength += obj.Bytes + } + return sizeToWrite, nil +} + +func (file *largeObjectCreateFile) writeSegment(buf []byte, writeSegmentIdx int, relativeFilePos int) (*Object, int, error) { + var ( + readers []io.Reader + existingSegment *Object + segmentSize int + ) + segmentName := getSegment(file.prefix, writeSegmentIdx+1) + sizeToRead := int(file.chunkSize) + if writeSegmentIdx < len(file.segments) { + existingSegment = &file.segments[writeSegmentIdx] + if writeSegmentIdx != len(file.segments)-1 { + sizeToRead = int(existingSegment.Bytes) + } + if relativeFilePos > 0 { + headers := make(Headers) + headers["Range"] = "bytes=0-" + strconv.FormatInt(int64(relativeFilePos-1), 10) + existingSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers) + if err != nil { + return nil, 0, err + } + defer existingSegmentReader.Close() + sizeToRead -= relativeFilePos + segmentSize += relativeFilePos + readers = []io.Reader{existingSegmentReader} + } + } + if sizeToRead > len(buf) { + sizeToRead = len(buf) + } + segmentSize += sizeToRead + readers = append(readers, bytes.NewReader(buf[:sizeToRead])) + if existingSegment != nil && segmentSize < int(existingSegment.Bytes) { + headers := make(Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(int64(segmentSize), 10) + "-" + tailSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers) + if err != nil { + return nil, 0, err + } + defer tailSegmentReader.Close() + segmentSize = int(existingSegment.Bytes) + readers = append(readers, tailSegmentReader) + } + segmentReader := io.MultiReader(readers...) + headers, err := file.conn.ObjectPut(file.segmentContainer, segmentName, segmentReader, true, "", file.contentType, nil) + if err != nil { + return nil, 0, err + } + return &Object{Name: segmentName, Bytes: int64(segmentSize), Hash: headers["Etag"]}, sizeToRead, nil +} + +func withBuffer(opts *LargeObjectOpts, lo LargeObjectFile) LargeObjectFile { + if !opts.NoBuffer { + return &bufferedLargeObjectFile{ + LargeObjectFile: lo, + bw: bufio.NewWriterSize(lo, int(opts.ChunkSize)), + } + } + return lo +} + +type bufferedLargeObjectFile struct { + LargeObjectFile + bw *bufio.Writer +} + +func (blo *bufferedLargeObjectFile) Close() error { + err := blo.bw.Flush() + if err != nil { + return err + } + return blo.LargeObjectFile.Close() +} + +func (blo *bufferedLargeObjectFile) Write(p []byte) (n int, err error) { + return blo.bw.Write(p) +} + +func (blo *bufferedLargeObjectFile) Seek(offset int64, whence int) (int64, error) { + err := blo.bw.Flush() + if err != nil { + return 0, err + } + return blo.LargeObjectFile.Seek(offset, whence) +} + +func (blo *bufferedLargeObjectFile) Size() int64 { + return blo.LargeObjectFile.Size() + int64(blo.bw.Buffered()) +} + +func (blo *bufferedLargeObjectFile) Flush() error { + err := blo.bw.Flush() + if err != nil { + return err + } + return blo.LargeObjectFile.Flush() +} diff --git a/vendor/github.com/ncw/swift/slo.go b/vendor/github.com/ncw/swift/slo.go new file mode 100644 index 00000000..4d94aa76 --- /dev/null +++ b/vendor/github.com/ncw/swift/slo.go @@ -0,0 +1,168 @@ +package swift + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" +) + +// StaticLargeObjectCreateFile represents an open static large object +type StaticLargeObjectCreateFile struct { + largeObjectCreateFile +} + +var SLONotSupported = errors.New("SLO not supported") + +type swiftSegment struct { + Path string `json:"path,omitempty"` + Etag string `json:"etag,omitempty"` + Size int64 `json:"size_bytes,omitempty"` + // When uploading a manifest, the attributes must be named `path`, `etag` and `size_bytes` + // but when querying the JSON content of a manifest with the `multipart-manifest=get` + // parameter, Swift names those attributes `name`, `hash` and `bytes`. + // We use all the different attributes names in this structure to be able to use + // the same structure for both uploading and retrieving. + Name string `json:"name,omitempty"` + Hash string `json:"hash,omitempty"` + Bytes int64 `json:"bytes,omitempty"` + ContentType string `json:"content_type,omitempty"` + LastModified string `json:"last_modified,omitempty"` +} + +// StaticLargeObjectCreateFile creates a static large object returning +// an object which satisfies io.Writer, io.Seeker, io.Closer and +// io.ReaderFrom. The flags are as passed to the largeObjectCreate +// method. +func (c *Connection) StaticLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) { + info, err := c.cachedQueryInfo() + if err != nil || !info.SupportsSLO() { + return nil, SLONotSupported + } + realMinChunkSize := info.SLOMinSegmentSize() + if realMinChunkSize > opts.MinChunkSize { + opts.MinChunkSize = realMinChunkSize + } + lo, err := c.largeObjectCreate(opts) + if err != nil { + return nil, err + } + return withBuffer(opts, &StaticLargeObjectCreateFile{ + largeObjectCreateFile: *lo, + }), nil +} + +// StaticLargeObjectCreate creates or truncates an existing static +// large object returning a writeable object. This sets opts.Flags to +// an appropriate value before calling StaticLargeObjectCreateFile +func (c *Connection) StaticLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) { + opts.Flags = os.O_TRUNC | os.O_CREATE + return c.StaticLargeObjectCreateFile(opts) +} + +// StaticLargeObjectDelete deletes a static large object and all of its segments. +func (c *Connection) StaticLargeObjectDelete(container string, path string) error { + info, err := c.cachedQueryInfo() + if err != nil || !info.SupportsSLO() { + return SLONotSupported + } + return c.LargeObjectDelete(container, path) +} + +// StaticLargeObjectMove moves a static large object from srcContainer, srcObjectName to dstContainer, dstObjectName +func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error { + swiftInfo, err := c.cachedQueryInfo() + if err != nil || !swiftInfo.SupportsSLO() { + return SLONotSupported + } + info, headers, err := c.Object(srcContainer, srcObjectName) + if err != nil { + return err + } + + container, segments, err := c.getAllSegments(srcContainer, srcObjectName, headers) + if err != nil { + return err + } + + if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments); err != nil { + return err + } + + if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil { + return err + } + + return nil +} + +// createSLOManifest creates a static large object manifest +func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object) error { + sloSegments := make([]swiftSegment, len(segments)) + for i, segment := range segments { + sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name) + sloSegments[i].Etag = segment.Hash + sloSegments[i].Size = segment.Bytes + } + + content, err := json.Marshal(sloSegments) + if err != nil { + return err + } + + values := url.Values{} + values.Set("multipart-manifest", "put") + if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, nil, values); err != nil { + return err + } + + return nil +} + +func (file *StaticLargeObjectCreateFile) Close() error { + return file.Flush() +} + +func (file *StaticLargeObjectCreateFile) Flush() error { + if err := file.conn.createSLOManifest(file.container, file.objectName, file.contentType, file.segmentContainer, file.segments); err != nil { + return err + } + return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size()) +} + +func (c *Connection) getAllSLOSegments(container, path string) (string, []Object, error) { + var ( + segmentList []swiftSegment + segments []Object + segPath string + segmentContainer string + ) + + values := url.Values{} + values.Set("multipart-manifest", "get") + + file, _, err := c.objectOpen(container, path, true, nil, values) + if err != nil { + return "", nil, err + } + + content, err := ioutil.ReadAll(file) + if err != nil { + return "", nil, err + } + + json.Unmarshal(content, &segmentList) + for _, segment := range segmentList { + segmentContainer, segPath = parseFullPath(segment.Name[1:]) + segments = append(segments, Object{ + Name: segPath, + Bytes: segment.Bytes, + Hash: segment.Hash, + }) + } + + return segmentContainer, segments, nil +} diff --git a/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/ncw/swift/swift.go index 4ba276b1..38e69653 100644 --- a/vendor/github.com/ncw/swift/swift.go +++ b/vendor/github.com/ncw/swift/swift.go @@ -11,9 +11,11 @@ import ( "fmt" "hash" "io" + "io/ioutil" "mime" "net/http" "net/url" + "os" "path" "strconv" "strings" @@ -33,6 +35,17 @@ const ( allObjectsChanLimit = 1000 // ...when fetching to a channel ) +// ObjectType is the type of the swift object, regular, static large, +// or dynamic large. +type ObjectType int + +// Values that ObjectType can take +const ( + RegularObjectType ObjectType = iota + StaticLargeObjectType + DynamicLargeObjectType +) + // Connection holds the details of the connection to the swift server. // // You need to provide UserName, ApiKey and AuthUrl when you create a @@ -86,6 +99,7 @@ type Connection struct { Domain string // User's domain name DomainId string // User's domain Id UserName string // UserName for api + UserId string // User Id ApiKey string // Key for api access AuthUrl string // Auth URL Retries int // Retries on error (default is 3) @@ -108,6 +122,139 @@ type Connection struct { client *http.Client Auth Authenticator `json:"-" xml:"-"` // the current authenticator authLock sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth + // swiftInfo is filled after QueryInfo is called + swiftInfo SwiftInfo +} + +// setFromEnv reads the value that param points to (it must be a +// pointer), if it isn't the zero value then it reads the environment +// variable name passed in, parses it according to the type and writes +// it to the pointer. +func setFromEnv(param interface{}, name string) (err error) { + val := os.Getenv(name) + if val == "" { + return + } + switch result := param.(type) { + case *string: + if *result == "" { + *result = val + } + case *int: + if *result == 0 { + *result, err = strconv.Atoi(val) + } + case *bool: + if *result == false { + *result, err = strconv.ParseBool(val) + } + case *time.Duration: + if *result == 0 { + *result, err = time.ParseDuration(val) + } + case *EndpointType: + if *result == EndpointType("") { + *result = EndpointType(val) + } + default: + return newErrorf(0, "can't set var of type %T", param) + } + return err +} + +// ApplyEnvironment reads environment variables and applies them to +// the Connection structure. It won't overwrite any parameters which +// are already set in the Connection struct. +// +// To make a new Connection object entirely from the environment you +// would do: +// +// c := new(Connection) +// err := c.ApplyEnvironment() +// if err != nil { log.Fatal(err) } +// +// The naming of these variables follows the official Openstack naming +// scheme so it should be compatible with OpenStack rc files. +// +// For v1 authentication (obsolete) +// ST_AUTH - Auth URL +// ST_USER - UserName for api +// ST_KEY - Key for api access +// +// For v2 authentication +// OS_AUTH_URL - Auth URL +// OS_USERNAME - UserName for api +// OS_PASSWORD - Key for api access +// OS_TENANT_NAME - Name of the tenant +// OS_TENANT_ID - Id of the tenant +// OS_REGION_NAME - Region to use - default is use first region +// +// For v3 authentication +// OS_AUTH_URL - Auth URL +// OS_USERNAME - UserName for api +// OS_USER_ID - User Id +// OS_PASSWORD - Key for api access +// OS_USER_DOMAIN_NAME - User's domain name +// OS_USER_DOMAIN_ID - User's domain Id +// OS_PROJECT_NAME - Name of the project +// OS_PROJECT_DOMAIN_NAME - Name of the tenant's domain, only needed if it differs from the user domain +// OS_PROJECT_DOMAIN_ID - Id of the tenant's domain, only needed if it differs the from user domain +// OS_TRUST_ID - If of the trust +// OS_REGION_NAME - Region to use - default is use first region +// +// Other +// OS_ENDPOINT_TYPE - Endpoint type public, internal or admin +// ST_AUTH_VERSION - Choose auth version - 1, 2 or 3 or leave at 0 for autodetect +// +// For manual authentication +// OS_STORAGE_URL - storage URL from alternate authentication +// OS_AUTH_TOKEN - Auth Token from alternate authentication +// +// Library specific +// GOSWIFT_RETRIES - Retries on error (default is 3) +// GOSWIFT_USER_AGENT - HTTP User agent (default goswift/1.0) +// GOSWIFT_CONNECT_TIMEOUT - Connect channel timeout with unit, eg "10s", "100ms" (default "10s") +// GOSWIFT_TIMEOUT - Data channel timeout with unit, eg "10s", "100ms" (default "60s") +// GOSWIFT_INTERNAL - Set this to "true" to use the the internal network (obsolete - use OS_ENDPOINT_TYPE) +func (c *Connection) ApplyEnvironment() (err error) { + for _, item := range []struct { + result interface{} + name string + }{ + // Environment variables - keep in same order as Connection + {&c.Domain, "OS_USER_DOMAIN_NAME"}, + {&c.DomainId, "OS_USER_DOMAIN_ID"}, + {&c.UserName, "OS_USERNAME"}, + {&c.UserId, "OS_USER_ID"}, + {&c.ApiKey, "OS_PASSWORD"}, + {&c.AuthUrl, "OS_AUTH_URL"}, + {&c.Retries, "GOSWIFT_RETRIES"}, + {&c.UserAgent, "GOSWIFT_USER_AGENT"}, + {&c.ConnectTimeout, "GOSWIFT_CONNECT_TIMEOUT"}, + {&c.Timeout, "GOSWIFT_TIMEOUT"}, + {&c.Region, "OS_REGION_NAME"}, + {&c.AuthVersion, "ST_AUTH_VERSION"}, + {&c.Internal, "GOSWIFT_INTERNAL"}, + {&c.Tenant, "OS_TENANT_NAME"}, //v2 + {&c.Tenant, "OS_PROJECT_NAME"}, // v3 + {&c.TenantId, "OS_TENANT_ID"}, + {&c.EndpointType, "OS_ENDPOINT_TYPE"}, + {&c.TenantDomain, "OS_PROJECT_DOMAIN_NAME"}, + {&c.TenantDomainId, "OS_PROJECT_DOMAIN_ID"}, + {&c.TrustId, "OS_TRUST_ID"}, + {&c.StorageUrl, "OS_STORAGE_URL"}, + {&c.AuthToken, "OS_AUTH_TOKEN"}, + // v1 auth alternatives + {&c.ApiKey, "ST_KEY"}, + {&c.UserName, "ST_USER"}, + {&c.AuthUrl, "ST_AUTH"}, + } { + err = setFromEnv(item.result, item.name) + if err != nil { + return newErrorf(0, "failed to read env var %q: %v", item.name, err) + } + } + return nil } // Error - all errors generated by this package are of this type. Other error @@ -140,6 +287,7 @@ type errorMap map[int]error var ( // Specific Errors you might want to check for equality + NotModified = newError(304, "Not Modified") BadRequest = newError(400, "Bad Request") AuthorizationFailed = newError(401, "Authorization Failed") ContainerNotFound = newError(404, "Container Not Found") @@ -167,6 +315,7 @@ var ( // Mappings for object errors objectErrorMap = errorMap{ + 304: NotModified, 400: BadRequest, 403: Forbidden, 404: ObjectNotFound, @@ -184,15 +333,32 @@ func checkClose(c io.Closer, err *error) { } } +// drainAndClose discards all data from rd and closes it. +// If an error occurs during Read, it is discarded. +func drainAndClose(rd io.ReadCloser, err *error) { + if rd == nil { + return + } + + _, _ = io.Copy(ioutil.Discard, rd) + cerr := rd.Close() + if err != nil && *err == nil { + *err = cerr + } +} + // parseHeaders checks a response for errors and translates into -// standard errors if necessary. +// standard errors if necessary. If an error is returned, resp.Body +// has been drained and closed. func (c *Connection) parseHeaders(resp *http.Response, errorMap errorMap) error { if errorMap != nil { if err, ok := errorMap[resp.StatusCode]; ok { + drainAndClose(resp.Body, nil) return err } } if resp.StatusCode < 200 || resp.StatusCode > 299 { + drainAndClose(resp.Body, nil) return newErrorf(resp.StatusCode, "HTTP Error: %d: %s", resp.StatusCode, resp.Status) } return nil @@ -305,13 +471,14 @@ again: } if req != nil { timer := time.NewTimer(c.ConnectTimeout) + defer timer.Stop() var resp *http.Response resp, err = c.doTimeoutRequest(timer, req) if err != nil { return } defer func() { - checkClose(resp.Body, &err) + drainAndClose(resp.Body, &err) // Flush the auth connection - we don't want to keep // it open if keepalives were enabled flushKeepaliveConnections(c.Transport) @@ -406,6 +573,24 @@ func (c *Connection) authenticated() bool { // the enabled middlewares and their configuration type SwiftInfo map[string]interface{} +func (i SwiftInfo) SupportsBulkDelete() bool { + _, val := i["bulk_delete"] + return val +} + +func (i SwiftInfo) SupportsSLO() bool { + _, val := i["slo"] + return val +} + +func (i SwiftInfo) SLOMinSegmentSize() int64 { + if slo, ok := i["slo"].(map[string]interface{}); ok { + val, _ := slo["min_segment_size"].(float64) + return int64(val) + } + return 1 +} + // Discover Swift configuration by doing a request against /info func (c *Connection) QueryInfo() (infos SwiftInfo, err error) { infoUrl, err := url.Parse(c.StorageUrl) @@ -413,14 +598,36 @@ func (c *Connection) QueryInfo() (infos SwiftInfo, err error) { return nil, err } infoUrl.Path = path.Join(infoUrl.Path, "..", "..", "info") - resp, err := http.Get(infoUrl.String()) + resp, err := c.client.Get(infoUrl.String()) if err == nil { + if resp.StatusCode != http.StatusOK { + drainAndClose(resp.Body, nil) + return nil, fmt.Errorf("Invalid status code for info request: %d", resp.StatusCode) + } err = readJson(resp, &infos) + if err == nil { + c.authLock.Lock() + c.swiftInfo = infos + c.authLock.Unlock() + } return infos, err } return nil, err } +func (c *Connection) cachedQueryInfo() (infos SwiftInfo, err error) { + c.authLock.Lock() + infos = c.swiftInfo + c.authLock.Unlock() + if infos == nil { + infos, err = c.QueryInfo() + if err != nil { + return + } + } + return infos, nil +} + // RequestOpts contains parameters for Connection.storage. type RequestOpts struct { Container string @@ -444,6 +651,7 @@ type RequestOpts struct { // Any other parameters (if not None) are added to the targetUrl // // Returns a response or an error. If response is returned then +// the resp.Body must be read completely and // resp.Body.Close() must be called on it, unless noResponse is set in // which case the body will be closed in this function // @@ -484,6 +692,7 @@ func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response, URL.RawQuery = p.Parameters.Encode() } timer := time.NewTimer(c.ConnectTimeout) + defer timer.Stop() reader := p.Body if reader != nil { reader = newWatchdogReader(reader, c.Timeout, timer) @@ -518,7 +727,7 @@ func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response, } // Check to see if token has expired if resp.StatusCode == 401 && retries > 0 { - _ = resp.Body.Close() + drainAndClose(resp.Body, nil) c.UnAuthenticate() retries-- } else { @@ -527,12 +736,12 @@ func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response, } if err = c.parseHeaders(resp, p.ErrorMap); err != nil { - _ = resp.Body.Close() return nil, nil, err } headers = readHeaders(resp) if p.NoResponse { - err = resp.Body.Close() + var err error + drainAndClose(resp.Body, &err) if err != nil { return nil, nil, err } @@ -574,7 +783,7 @@ func (c *Connection) storage(p RequestOpts) (resp *http.Response, headers Header // // Closes the response when done func readLines(resp *http.Response) (lines []string, err error) { - defer checkClose(resp.Body, &err) + defer drainAndClose(resp.Body, &err) reader := bufio.NewReader(resp.Body) buffer := bytes.NewBuffer(make([]byte, 0, 128)) var part []byte @@ -599,7 +808,7 @@ func readLines(resp *http.Response) (lines []string, err error) { // // Closes the response when done func readJson(resp *http.Response, result interface{}) (err error) { - defer checkClose(resp.Body, &err) + defer drainAndClose(resp.Body, &err) decoder := json.NewDecoder(resp.Body) return decoder.Decode(result) } @@ -796,14 +1005,15 @@ func (c *Connection) ObjectNames(container string, opts *ObjectsOpts) ([]string, // Object contains information about an object type Object struct { - Name string `json:"name"` // object name - ContentType string `json:"content_type"` // eg application/directory - Bytes int64 `json:"bytes"` // size in bytes - ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server - LastModified time.Time // Last modified time converted to a time.Time - Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" - PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist - SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories" + Name string `json:"name"` // object name + ContentType string `json:"content_type"` // eg application/directory + Bytes int64 `json:"bytes"` // size in bytes + ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server + LastModified time.Time // Last modified time converted to a time.Time + Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" + PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist + SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories" + ObjectType ObjectType // type of this object } // Objects returns a slice of Object with information about each @@ -1141,6 +1351,19 @@ func (file *ObjectCreateFile) Close() error { return nil } +// Headers returns the response headers from the created object if the upload +// has been completed. The Close() method must be called on an ObjectCreateFile +// before this method. +func (file *ObjectCreateFile) Headers() (Headers, error) { + // error out if upload is not complete. + select { + case <-file.done: + default: + return nil, fmt.Errorf("Cannot get metadata, object upload failed or has not yet completed.") + } + return file.headers, nil +} + // Check it satisfies the interface var _ io.WriteCloser = &ObjectCreateFile{} @@ -1202,7 +1425,7 @@ func (c *Connection) ObjectCreate(container string, objectName string, checkHash } // Run the PUT in the background piping it data go func() { - file.resp, file.headers, file.err = c.storage(RequestOpts{ + opts := RequestOpts{ Container: container, ObjectName: objectName, Operation: "PUT", @@ -1210,7 +1433,8 @@ func (c *Connection) ObjectCreate(container string, objectName string, checkHash Body: pipeReader, NoResponse: true, ErrorMap: objectErrorMap, - }) + } + file.resp, file.headers, file.err = c.storage(opts) // Signal finished pipeReader.Close() close(file.done) @@ -1218,6 +1442,37 @@ func (c *Connection) ObjectCreate(container string, objectName string, checkHash return } +func (c *Connection) objectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers, parameters url.Values) (headers Headers, err error) { + extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) + hash := md5.New() + var body io.Reader = contents + if checkHash { + body = io.TeeReader(contents, hash) + } + _, headers, err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "PUT", + Headers: extraHeaders, + Body: body, + NoResponse: true, + ErrorMap: objectErrorMap, + Parameters: parameters, + }) + if err != nil { + return + } + if checkHash { + receivedMd5 := strings.ToLower(headers["Etag"]) + calculatedMd5 := fmt.Sprintf("%x", hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + err = ObjectCorrupted + return + } + } + return +} + // ObjectPut creates or updates the path in the container from // contents. contents should be an open io.Reader which will have all // its contents read. @@ -1240,33 +1495,7 @@ func (c *Connection) ObjectCreate(container string, objectName string, checkHash // If contentType is set it will be used, otherwise one will be // guessed from objectName using mime.TypeByExtension func (c *Connection) ObjectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers) (headers Headers, err error) { - extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) - hash := md5.New() - var body io.Reader = contents - if checkHash { - body = io.TeeReader(contents, hash) - } - _, headers, err = c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "PUT", - Headers: extraHeaders, - Body: body, - NoResponse: true, - ErrorMap: objectErrorMap, - }) - if err != nil { - return - } - if checkHash { - receivedMd5 := strings.ToLower(headers["Etag"]) - calculatedMd5 := fmt.Sprintf("%x", hash.Sum(nil)) - if receivedMd5 != calculatedMd5 { - err = ObjectCorrupted - return - } - } - return + return c.objectPut(container, objectName, contents, checkHash, Hash, contentType, h, nil) } // ObjectPutBytes creates an object from a []byte in a container. @@ -1274,7 +1503,8 @@ func (c *Connection) ObjectPut(container string, objectName string, contents io. // This is a simplified interface which checks the MD5. func (c *Connection) ObjectPutBytes(container string, objectName string, contents []byte, contentType string) (err error) { buf := bytes.NewBuffer(contents) - _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, nil) + h := Headers{"Content-Length": strconv.Itoa(len(contents))} + _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, h) return } @@ -1283,7 +1513,8 @@ func (c *Connection) ObjectPutBytes(container string, objectName string, content // This is a simplified interface which checks the MD5 func (c *Connection) ObjectPutString(container string, objectName string, contents string, contentType string) (err error) { buf := strings.NewReader(contents) - _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, nil) + h := Headers{"Content-Length": strconv.Itoa(len(contents))} + _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, h) return } @@ -1303,10 +1534,14 @@ type ObjectOpenFile struct { lengthOk bool // whether length is valid length int64 // length of the object if read seeked bool // whether we have seeked this file or not + overSeeked bool // set if we have seeked to the end or beyond } // Read bytes from the object - see io.Reader func (file *ObjectOpenFile) Read(p []byte) (n int, err error) { + if file.overSeeked { + return 0, io.EOF + } n, err = file.body.Read(p) file.bytes += int64(n) file.pos += int64(n) @@ -1330,6 +1565,7 @@ func (file *ObjectOpenFile) Read(p []byte) (n int, err error) { // // Seek(0, 1) will return the current file pointer. func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err error) { + file.overSeeked = false switch whence { case 0: // relative to start newPos = offset @@ -1340,6 +1576,10 @@ func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err er return file.pos, newError(0, "Length of file unknown so can't seek from end") } newPos = file.length + offset + if offset >= 0 { + file.overSeeked = true + return + } default: panic("Unknown whence in ObjectOpenFile.Seek") } @@ -1419,6 +1659,57 @@ func (file *ObjectOpenFile) Close() (err error) { var _ io.ReadCloser = &ObjectOpenFile{} var _ io.Seeker = &ObjectOpenFile{} +func (c *Connection) objectOpenBase(container string, objectName string, checkHash bool, h Headers, parameters url.Values) (file *ObjectOpenFile, headers Headers, err error) { + var resp *http.Response + opts := RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "GET", + ErrorMap: objectErrorMap, + Headers: h, + Parameters: parameters, + } + resp, headers, err = c.storage(opts) + if err != nil { + return + } + // Can't check MD5 on an object with X-Object-Manifest or X-Static-Large-Object set + if checkHash && headers.IsLargeObject() { + // log.Printf("swift: turning off md5 checking on object with manifest %v", objectName) + checkHash = false + } + file = &ObjectOpenFile{ + connection: c, + container: container, + objectName: objectName, + headers: h, + resp: resp, + checkHash: checkHash, + body: resp.Body, + } + if checkHash { + file.hash = md5.New() + file.body = io.TeeReader(resp.Body, file.hash) + } + // Read Content-Length + if resp.Header.Get("Content-Length") != "" { + file.length, err = getInt64FromHeader(resp, "Content-Length") + file.lengthOk = (err == nil) + } + return +} + +func (c *Connection) objectOpen(container string, objectName string, checkHash bool, h Headers, parameters url.Values) (file *ObjectOpenFile, headers Headers, err error) { + err = withLORetry(0, func() (Headers, int64, error) { + file, headers, err = c.objectOpenBase(container, objectName, checkHash, h, parameters) + if err != nil { + return headers, 0, err + } + return headers, file.length, nil + }) + return +} + // ObjectOpen returns an ObjectOpenFile for reading the contents of // the object. This satisfies the io.ReadCloser and the io.Seeker // interfaces. @@ -1443,41 +1734,7 @@ var _ io.Seeker = &ObjectOpenFile{} // // headers["Content-Type"] will give the content type if desired. func (c *Connection) ObjectOpen(container string, objectName string, checkHash bool, h Headers) (file *ObjectOpenFile, headers Headers, err error) { - var resp *http.Response - resp, headers, err = c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "GET", - ErrorMap: objectErrorMap, - Headers: h, - }) - if err != nil { - return - } - // Can't check MD5 on an object with X-Object-Manifest or X-Static-Large-Object set - if checkHash && (headers["X-Object-Manifest"] != "" || headers["X-Static-Large-Object"] != "") { - // log.Printf("swift: turning off md5 checking on object with manifest %v", objectName) - checkHash = false - } - file = &ObjectOpenFile{ - connection: c, - container: container, - objectName: objectName, - headers: h, - resp: resp, - checkHash: checkHash, - body: resp.Body, - } - if checkHash { - file.hash = md5.New() - file.body = io.TeeReader(resp.Body, file.hash) - } - // Read Content-Length - if resp.Header.Get("Content-Length") != "" { - file.length, err = getInt64FromHeader(resp, "Content-Length") - file.lengthOk = (err == nil) - } - return + return c.objectOpen(container, objectName, checkHash, h, nil) } // ObjectGet gets the object into the io.Writer contents. @@ -1580,19 +1837,10 @@ type BulkDeleteResult struct { Headers Headers // Response HTTP headers. } -// BulkDelete deletes multiple objectNames from container in one operation. -// -// Some servers may not accept bulk-delete requests since bulk-delete is -// an optional feature of swift - these will return the Forbidden error. -// -// See also: -// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html -// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html -func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) { +func (c *Connection) doBulkDelete(objects []string) (result BulkDeleteResult, err error) { var buffer bytes.Buffer - for _, s := range objectNames { - buffer.WriteString(fmt.Sprintf("/%s/%s\n", container, - url.QueryEscape(s))) + for _, s := range objects { + buffer.WriteString(url.QueryEscape(s) + "\n") } resp, headers, err := c.storage(RequestOpts{ Operation: "DELETE", @@ -1633,6 +1881,22 @@ func (c *Connection) BulkDelete(container string, objectNames []string) (result return } +// BulkDelete deletes multiple objectNames from container in one operation. +// +// Some servers may not accept bulk-delete requests since bulk-delete is +// an optional feature of swift - these will return the Forbidden error. +// +// See also: +// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html +// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html +func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) { + fullPaths := make([]string, len(objectNames)) + for i, name := range objectNames { + fullPaths[i] = fmt.Sprintf("/%s/%s", container, name) + } + return c.doBulkDelete(fullPaths) +} + // BulkUploadResult stores results of BulkUpload(). // // Individual errors may (or may not) be returned by Errors. @@ -1716,6 +1980,17 @@ func (c *Connection) BulkUpload(uploadPath string, dataStream io.Reader, format // // Use headers.ObjectMetadata() to read the metadata in the Headers. func (c *Connection) Object(container string, objectName string) (info Object, headers Headers, err error) { + err = withLORetry(0, func() (Headers, int64, error) { + info, headers, err = c.objectBase(container, objectName) + if err != nil { + return headers, 0, err + } + return headers, info.Bytes, nil + }) + return +} + +func (c *Connection) objectBase(container string, objectName string) (info Object, headers Headers, err error) { var resp *http.Response resp, headers, err = c.storage(RequestOpts{ Container: container, @@ -1756,6 +2031,12 @@ func (c *Connection) Object(container string, objectName string) (info Object, h } info.Hash = resp.Header.Get("Etag") + if resp.Header.Get("X-Object-Manifest") != "" { + info.ObjectType = DynamicLargeObjectType + } else if resp.Header.Get("X-Static-Large-Object") != "" { + info.ObjectType = StaticLargeObjectType + } + return } diff --git a/vendor/github.com/ncw/swift/swifttest/server.go b/vendor/github.com/ncw/swift/swifttest/server.go index a49abb9a..6ec50f60 100644 --- a/vendor/github.com/ncw/swift/swifttest/server.go +++ b/vendor/github.com/ncw/swift/swifttest/server.go @@ -21,6 +21,7 @@ import ( "mime" "net" "net/http" + "net/http/httptest" "net/url" "path" "regexp" @@ -28,6 +29,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "testing" "time" @@ -39,21 +41,28 @@ const ( TEST_ACCOUNT = "swifttest" ) +type HandlerOverrideFunc func(w http.ResponseWriter, r *http.Request, recorder *httptest.ResponseRecorder) + type SwiftServer struct { + // `sync/atomic` expects the first word in an allocated struct to be 64-bit + // aligned on both ARM and x86-32. + // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG for more details. + reqId int64 + sync.RWMutex t *testing.T - reqId int mu sync.Mutex Listener net.Listener AuthURL string URL string Accounts map[string]*account Sessions map[string]*session + override map[string]HandlerOverrideFunc } // The Folder type represents a container stored in an account type Folder struct { - Count int `json:"count"` - Bytes int `json:"bytes"` + Count int64 `json:"count"` + Bytes int64 `json:"bytes"` Name string `json:"name"` } @@ -96,13 +105,16 @@ type metadata struct { } type account struct { + sync.RWMutex swift.Account metadata - password string - Containers map[string]*container + password string + ContainersLock sync.RWMutex + Containers map[string]*container } type object struct { + sync.RWMutex metadata name string mtime time.Time @@ -112,11 +124,31 @@ type object struct { } type container struct { + // `sync/atomic` expects the first word in an allocated struct to be 64-bit + // aligned on both ARM and x86-32. + // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG for more details. + bytes int64 + sync.RWMutex metadata name string ctime time.Time objects map[string]*object - bytes int +} + +type segment struct { + Path string `json:"path,omitempty"` + Hash string `json:"hash,omitempty"` + Size int64 `json:"size_bytes,omitempty"` + // When uploading a manifest, the attributes must be named `path`, `hash` and `size` + // but when querying the JSON content of a manifest with the `multipart-manifest=get` + // parameter, Swift names those attributes `name`, `etag` and `bytes`. + // We use all the different attributes names in this structure to be able to use + // the same structure for both uploading and retrieving. + Name string `json:"name,omitempty"` + Etag string `json:"etag,omitempty"` + Bytes int64 `json:"bytes,omitempty"` + ContentType string `json:"content_type,omitempty"` + LastModified string `json:"last_modified,omitempty"` } // A resource encapsulates the subject of an HTTP request. @@ -179,9 +211,12 @@ func (m metadata) getMetadata(a *action) { } } -func (c container) list(delimiter string, marker string, prefix string, parent string) (resp []interface{}) { +func (c *container) list(delimiter string, marker string, prefix string, parent string) (resp []interface{}) { var tmp orderedObjects + c.RLock() + defer c.RUnlock() + // first get all matching objects and arrange them in alphabetical order. for _, obj := range c.objects { if strings.HasPrefix(obj.name, prefix) { @@ -236,19 +271,23 @@ func (r containerResource) get(a *action) interface{} { fatalf(404, "NoSuchContainer", "The specified container does not exist") } + r.container.RLock() + delimiter := a.req.Form.Get("delimiter") marker := a.req.Form.Get("marker") prefix := a.req.Form.Get("prefix") format := a.req.URL.Query().Get("format") parent := a.req.Form.Get("path") - a.w.Header().Set("X-Container-Bytes-Used", strconv.Itoa(r.container.bytes)) + a.w.Header().Set("X-Container-Bytes-Used", strconv.Itoa(int(r.container.bytes))) a.w.Header().Set("X-Container-Object-Count", strconv.Itoa(len(r.container.objects))) r.container.getMetadata(a) if a.req.Method == "HEAD" { + r.container.RUnlock() return nil } + r.container.RUnlock() objects := r.container.list(delimiter, marker, prefix, parent) @@ -297,8 +336,10 @@ func (r containerResource) delete(a *action) interface{} { if len(b.objects) > 0 { fatalf(409, "Conflict", "The container you tried to delete is not empty") } + a.user.Lock() delete(a.user.Containers, b.name) a.user.Account.Containers-- + a.user.Unlock() return nil } @@ -319,8 +360,11 @@ func (r containerResource) put(a *action) interface{} { }, } r.container.setMetadata(a, "container") + + a.user.Lock() a.user.Containers[r.name] = r.container a.user.Account.Containers++ + a.user.Unlock() } return nil @@ -330,10 +374,13 @@ func (r containerResource) post(a *action) interface{} { if r.container == nil { fatalf(400, "Method", "The resource could not be found.") } else { + r.container.RLock() + defer r.container.RUnlock() + r.container.setMetadata(a, "container") a.w.WriteHeader(201) jsonMarshal(a.w, Folder{ - Count: len(r.container.objects), + Count: int64(len(r.container.objects)), Bytes: r.container.bytes, Name: r.container.name, }) @@ -388,10 +435,11 @@ func (obj *object) Key() Key { } var metaHeaders = map[string]bool{ - "Content-Type": true, - "Content-Encoding": true, - "Content-Disposition": true, - "X-Object-Manifest": true, + "Content-Type": true, + "Content-Encoding": true, + "Content-Disposition": true, + "X-Object-Manifest": true, + "X-Static-Large-Object": true, } var rangeRegexp = regexp.MustCompile("(bytes=)?([0-9]*)-([0-9]*)") @@ -409,6 +457,9 @@ func (objr objectResource) get(a *action) interface{} { fatalf(404, "Not Found", "The resource could not be found.") } + obj.RLock() + defer obj.RUnlock() + h := a.w.Header() // add metadata obj.getMetadata(a) @@ -433,7 +484,9 @@ func (objr objectResource) get(a *action) interface{} { if manifest, ok := obj.meta["X-Object-Manifest"]; ok { var segments []io.Reader components := strings.SplitN(manifest[0], "/", 2) + a.user.RLock() segContainer := a.user.Containers[components[0]] + a.user.RUnlock() prefix := components[1] resp := segContainer.list("", "", prefix, "") sum := md5.New() @@ -453,19 +506,54 @@ func (objr objectResource) get(a *action) interface{} { } etag = sum.Sum(nil) if end == -1 { - end = size + end = size - 1 } - reader = io.LimitReader(io.MultiReader(segments...), int64(end-start)) + reader = io.LimitReader(io.MultiReader(segments...), int64(end-start+1)) + } else if value, ok := obj.meta["X-Static-Large-Object"]; ok && value[0] == "True" && a.req.URL.Query().Get("multipart-manifest") != "get" { + var segments []io.Reader + var segmentList []segment + json.Unmarshal(obj.data, &segmentList) + cursor := 0 + size := 0 + sum := md5.New() + for _, segment := range segmentList { + components := strings.SplitN(segment.Name[1:], "/", 2) + a.user.RLock() + segContainer := a.user.Containers[components[0]] + a.user.RUnlock() + objectName := components[1] + segObject := segContainer.objects[objectName] + length := len(segObject.data) + size += length + sum.Write([]byte(hex.EncodeToString(segObject.checksum))) + if start >= cursor+length { + continue + } + segments = append(segments, bytes.NewReader(segObject.data[max(0, start-cursor):])) + cursor += length + } + etag = sum.Sum(nil) + if end == -1 { + end = size - 1 + } + reader = io.LimitReader(io.MultiReader(segments...), int64(end-start+1)) } else { if end == -1 { - end = len(obj.data) + end = len(obj.data) - 1 } etag = obj.checksum - reader = bytes.NewReader(obj.data[start:end]) + reader = bytes.NewReader(obj.data[start : end+1]) } - h.Set("Content-Length", fmt.Sprint(end-start)) - h.Set("ETag", hex.EncodeToString(etag)) + etagHex := hex.EncodeToString(etag) + + if a.req.Header.Get("If-None-Match") == etagHex { + a.w.WriteHeader(http.StatusNotModified) + return nil + } + + h.Set("Content-Length", fmt.Sprint(end-start+1)) + h.Set("ETag", etagHex) h.Set("Last-Modified", obj.mtime.Format(http.TimeFormat)) if a.req.Method == "HEAD" { @@ -514,10 +602,10 @@ func (objr objectResource) put(a *action) interface{} { meta: make(http.Header), }, } - a.user.Objects++ + atomic.AddInt64(&a.user.Objects, 1) } else { - objr.container.bytes -= len(obj.data) - a.user.BytesUsed -= int64(len(obj.data)) + atomic.AddInt64(&objr.container.bytes, -int64(len(obj.data))) + atomic.AddInt64(&a.user.BytesUsed, -int64(len(obj.data))) } var content_type string @@ -528,15 +616,39 @@ func (objr objectResource) put(a *action) interface{} { } } + if a.req.URL.Query().Get("multipart-manifest") == "put" { + // TODO: check the content of the SLO + a.req.Header.Set("X-Static-Large-Object", "True") + + var segments []segment + json.Unmarshal(data, &segments) + for i := range segments { + segments[i].Name = "/" + segments[i].Path + segments[i].Path = "" + segments[i].Hash = segments[i].Etag + segments[i].Etag = "" + segments[i].Bytes = segments[i].Size + segments[i].Size = 0 + } + + data, _ = json.Marshal(segments) + sum = md5.New() + sum.Write(data) + gotHash = sum.Sum(nil) + } + // PUT request has been successful - save data and metadata obj.setMetadata(a, "object") obj.content_type = content_type obj.data = data obj.checksum = gotHash obj.mtime = time.Now().UTC() + objr.container.Lock() objr.container.objects[objr.name] = obj - objr.container.bytes += len(data) - a.user.BytesUsed += int64(len(data)) + objr.container.bytes += int64(len(data)) + objr.container.Unlock() + + atomic.AddInt64(&a.user.BytesUsed, int64(len(data))) h := a.w.Header() h.Set("ETag", hex.EncodeToString(obj.checksum)) @@ -549,14 +661,25 @@ func (objr objectResource) delete(a *action) interface{} { fatalf(404, "NoSuchKey", "The specified key does not exist.") } - objr.container.bytes -= len(objr.object.data) - a.user.BytesUsed -= int64(len(objr.object.data)) + objr.container.Lock() + defer objr.container.Unlock() + + objr.object.Lock() + defer objr.object.Unlock() + + objr.container.bytes -= int64(len(objr.object.data)) delete(objr.container.objects, objr.name) - a.user.Objects-- + + atomic.AddInt64(&a.user.BytesUsed, -int64(len(objr.object.data))) + atomic.AddInt64(&a.user.Objects, -1) + return nil } func (objr objectResource) post(a *action) interface{} { + objr.object.Lock() + defer objr.object.Unlock() + obj := objr.object obj.setMetadata(a, "object") return nil @@ -568,6 +691,9 @@ func (objr objectResource) copy(a *action) interface{} { } obj := objr.object + obj.RLock() + defer obj.RUnlock() + destination := a.req.Header.Get("Destination") if destination == "" { fatalf(400, "Bad Request", "You must provide a Destination header") @@ -590,29 +716,38 @@ func (objr objectResource) copy(a *action) interface{} { meta: make(http.Header), }, } - a.user.Objects++ + atomic.AddInt64(&a.user.Objects, 1) } else { obj2 = objr2.object - objr2.container.bytes -= len(obj2.data) - a.user.BytesUsed -= int64(len(obj2.data)) + atomic.AddInt64(&objr2.container.bytes, -int64(len(obj2.data))) + atomic.AddInt64(&a.user.BytesUsed, -int64(len(obj2.data))) } default: fatalf(400, "Bad Request", "Destination must point to a valid object path") } + if objr2.container.name != objr2.container.name && obj2.name != obj.name { + obj2.Lock() + defer obj2.Unlock() + } + obj2.content_type = obj.content_type obj2.data = obj.data obj2.checksum = obj.checksum obj2.mtime = time.Now() - objr2.container.objects[objr2.name] = obj2 - objr2.container.bytes += len(obj.data) - a.user.BytesUsed += int64(len(obj.data)) for key, values := range obj.metadata.meta { obj2.metadata.meta[key] = values } obj2.setMetadata(a, "object") + objr2.container.Lock() + objr2.container.objects[objr2.name] = obj2 + objr2.container.bytes += int64(len(obj.data)) + objr2.container.Unlock() + + atomic.AddInt64(&a.user.BytesUsed, int64(len(obj.data))) + return nil } @@ -620,8 +755,14 @@ func (s *SwiftServer) serveHTTP(w http.ResponseWriter, req *http.Request) { // ignore error from ParseForm as it's usually spurious. req.ParseForm() - s.mu.Lock() - defer s.mu.Unlock() + if fn := s.override[req.URL.Path]; fn != nil { + originalRW := w + recorder := httptest.NewRecorder() + w = recorder + defer func() { + fn(originalRW, req, recorder) + }() + } if DEBUG { log.Printf("swifttest %q %q", req.Method, req.URL) @@ -630,9 +771,9 @@ func (s *SwiftServer) serveHTTP(w http.ResponseWriter, req *http.Request) { srv: s, w: w, req: req, - reqId: fmt.Sprintf("%09X", s.reqId), + reqId: fmt.Sprintf("%09X", atomic.LoadInt64(&s.reqId)), } - s.reqId++ + atomic.AddInt64(&s.reqId, 1) var r resource defer func() { @@ -651,6 +792,8 @@ func (s *SwiftServer) serveHTTP(w http.ResponseWriter, req *http.Request) { if req.URL.String() == "/v1.0" { username := req.Header.Get("x-auth-user") key := req.Header.Get("x-auth-key") + s.Lock() + defer s.Unlock() if acct, ok := s.Accounts[username]; ok { if acct.password == key { r := make([]byte, 16) @@ -676,6 +819,11 @@ func (s *SwiftServer) serveHTTP(w http.ResponseWriter, req *http.Request) { "tempurl": map[string]interface{}{ "methods": []string{"GET", "HEAD", "PUT"}, }, + "slo": map[string]interface{}{ + "max_manifest_segments": 1000, + "max_manifest_size": 2097152, + "min_segment_size": 1, + }, }) return } @@ -688,9 +836,11 @@ func (s *SwiftServer) serveHTTP(w http.ResponseWriter, req *http.Request) { if key == "" && signature != "" && expires != "" { accountName, _, _, _ := s.parseURL(req.URL) secretKey := "" + s.RLock() if account, ok := s.Accounts[accountName]; ok { secretKey = account.meta.Get("X-Account-Meta-Temp-Url-Key") } + s.RUnlock() get_hmac := func(method string) string { mac := hmac.New(sha1.New, []byte(secretKey)) @@ -707,12 +857,16 @@ func (s *SwiftServer) serveHTTP(w http.ResponseWriter, req *http.Request) { panic(notAuthorized()) } } else { + s.RLock() session, ok := s.Sessions[key[7:]] if !ok { + s.RUnlock() panic(notAuthorized()) + return } a.user = s.Accounts[session.username] + s.RUnlock() } switch req.Method { @@ -746,6 +900,14 @@ func (s *SwiftServer) serveHTTP(w http.ResponseWriter, req *http.Request) { } } +func (s *SwiftServer) SetOverride(path string, fn HandlerOverrideFunc) { + s.override[path] = fn +} + +func (s *SwiftServer) UnsetOverride(path string) { + delete(s.override, path) +} + func jsonMarshal(w io.Writer, x interface{}) { if err := json.NewEncoder(w).Encode(x); err != nil { panic(fmt.Errorf("error marshalling %#v: %v", x, err)) @@ -773,14 +935,21 @@ func (srv *SwiftServer) resourceForURL(u *url.URL) (r resource) { fatalf(404, "InvalidURI", err.Error()) } + srv.RLock() account, ok := srv.Accounts[accountName] if !ok { + //srv.RUnlock() fatalf(404, "NoSuchAccount", "The specified account does not exist") } + srv.RUnlock() + account.RLock() if containerName == "" { + account.RUnlock() return rootResource{} } + account.RUnlock() + b := containerResource{ name: containerName, container: account.Containers[containerName], @@ -800,6 +969,8 @@ func (srv *SwiftServer) resourceForURL(u *url.URL) (r resource) { container: b.container, } + objr.container.RLock() + defer objr.container.RUnlock() if obj := objr.container.objects[objr.name]; obj != nil { objr.object = obj } @@ -835,9 +1006,12 @@ func (rootResource) get(a *action) interface{} { h := a.w.Header() - h.Set("X-Account-Bytes-Used", strconv.Itoa(int(a.user.BytesUsed))) - h.Set("X-Account-Container-Count", strconv.Itoa(int(a.user.Account.Containers))) - h.Set("X-Account-Object-Count", strconv.Itoa(int(a.user.Objects))) + h.Set("X-Account-Bytes-Used", strconv.Itoa(int(atomic.LoadInt64(&a.user.BytesUsed)))) + h.Set("X-Account-Container-Count", strconv.Itoa(int(atomic.LoadInt64(&a.user.Account.Containers)))) + h.Set("X-Account-Object-Count", strconv.Itoa(int(atomic.LoadInt64(&a.user.Objects)))) + + a.user.RLock() + defer a.user.RUnlock() // add metadata a.user.metadata.getMetadata(a) @@ -862,7 +1036,7 @@ func (rootResource) get(a *action) interface{} { } if format == "json" { resp = append(resp, Folder{ - Count: len(container.objects), + Count: int64(len(container.objects)), Bytes: container.bytes, Name: container.name, }) @@ -879,7 +1053,9 @@ func (rootResource) get(a *action) interface{} { } func (r rootResource) post(a *action) interface{} { + a.user.Lock() a.user.metadata.setMetadata(a, "account") + a.user.Unlock() return nil } @@ -894,21 +1070,10 @@ func (rootResource) delete(a *action) interface{} { func (rootResource) copy(a *action) interface{} { return notAllowed() } func NewSwiftServer(address string) (*SwiftServer, error) { - var ( - l net.Listener - err error - ) if strings.Index(address, ":") == -1 { - for port := 1024; port < 65535; port++ { - addr := fmt.Sprintf("%s:%d", address, port) - if l, err = net.Listen("tcp", addr); err == nil { - address = addr - break - } - } - } else { - l, err = net.Listen("tcp", address) + address += ":0" } + l, err := net.Listen("tcp", address) if err != nil { return nil, fmt.Errorf("cannot listen on %s: %v", address, err) } @@ -919,6 +1084,7 @@ func NewSwiftServer(address string) (*SwiftServer, error) { URL: "http://" + l.Addr().String() + "/v1", Accounts: make(map[string]*account), Sessions: make(map[string]*session), + override: make(map[string]HandlerOverrideFunc), } server.Accounts[TEST_ACCOUNT] = &account{ diff --git a/vendor/github.com/ncw/swift/timeout_reader.go b/vendor/github.com/ncw/swift/timeout_reader.go index 3839e9ea..88ae7332 100644 --- a/vendor/github.com/ncw/swift/timeout_reader.go +++ b/vendor/github.com/ncw/swift/timeout_reader.go @@ -38,10 +38,12 @@ func (t *timeoutReader) Read(p []byte) (int, error) { done <- result{n, err} }() // Wait for the read or the timeout + timer := time.NewTimer(t.timeout) + defer timer.Stop() select { case r := <-done: return r.n, r.err - case <-time.After(t.timeout): + case <-timer.C: t.cancel() return 0, TimeoutError } diff --git a/vendor/github.com/ncw/swift/watchdog_reader.go b/vendor/github.com/ncw/swift/watchdog_reader.go index b12b1bbe..2714c9e1 100644 --- a/vendor/github.com/ncw/swift/watchdog_reader.go +++ b/vendor/github.com/ncw/swift/watchdog_reader.go @@ -5,29 +5,50 @@ import ( "time" ) +var watchdogChunkSize = 1 << 20 // 1 MiB + // An io.Reader which resets a watchdog timer whenever data is read type watchdogReader struct { - timeout time.Duration - reader io.Reader - timer *time.Timer + timeout time.Duration + reader io.Reader + timer *time.Timer + chunkSize int } // Returns a new reader which will kick the watchdog timer whenever data is read func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader { return &watchdogReader{ - timeout: timeout, - reader: reader, - timer: timer, + timeout: timeout, + reader: reader, + timer: timer, + chunkSize: watchdogChunkSize, } } // Read reads up to len(p) bytes into p -func (t *watchdogReader) Read(p []byte) (n int, err error) { - // FIXME limit the amount of data read in one chunk so as to not exceed the timeout? +func (t *watchdogReader) Read(p []byte) (int, error) { + //read from underlying reader in chunks not larger than t.chunkSize + //while resetting the watchdog timer before every read; the small chunk + //size ensures that the timer does not fire when reading a large amount of + //data from a slow connection + start := 0 + end := len(p) + for start < end { + length := end - start + if length > t.chunkSize { + length = t.chunkSize + } + + resetTimer(t.timer, t.timeout) + n, err := t.reader.Read(p[start : start+length]) + start += n + if n == 0 || err != nil { + return start, err + } + } + resetTimer(t.timer, t.timeout) - n, err = t.reader.Read(p) - resetTimer(t.timer, t.timeout) - return + return start, nil } // Check it satisfies the interface From fd32d5f96203a037f42be549217877230f7aeb18 Mon Sep 17 00:00:00 2001 From: Corey Quon Date: Fri, 31 Aug 2018 14:15:50 -0700 Subject: [PATCH 150/276] update github.com/ncw/swift package in vendor to v1.0.40 Signed-off-by: Corey Quon --- vendor.conf | 2 +- vendor/github.com/ncw/swift/README.md | 39 ++++++++++++++------------- vendor/github.com/ncw/swift/slo.go | 11 +++++--- vendor/github.com/ncw/swift/swift.go | 22 ++++++++++++--- 4 files changed, 46 insertions(+), 28 deletions(-) diff --git a/vendor.conf b/vendor.conf index bcb2d1f0..e676bd8c 100644 --- a/vendor.conf +++ b/vendor.conf @@ -23,11 +23,11 @@ github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef +github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6 github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd -github.com/ncw/swift c95c6e5c2d1a3d37fc44c8c6dc9e231c7500667d github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 diff --git a/vendor/github.com/ncw/swift/README.md b/vendor/github.com/ncw/swift/README.md index 5094a467..1ffd1ccd 100644 --- a/vendor/github.com/ncw/swift/README.md +++ b/vendor/github.com/ncw/swift/README.md @@ -26,26 +26,27 @@ See here for full package docs - http://godoc.org/github.com/ncw/swift Here is a short example from the docs +```go +import "github.com/ncw/swift" - import "github.com/ncw/swift" - - // Create a connection - c := swift.Connection{ - UserName: "user", - ApiKey: "key", - AuthUrl: "auth_url", - Domain: "domain", // Name of the domain (v3 auth only) - Tenant: "tenant", // Name of the tenant (v2 auth only) - } - // Authenticate - err := c.Authenticate() - if err != nil { - panic(err) - } - // List all the containers - containers, err := c.ContainerNames(nil) - fmt.Println(containers) - // etc... +// Create a connection +c := swift.Connection{ + UserName: "user", + ApiKey: "key", + AuthUrl: "auth_url", + Domain: "domain", // Name of the domain (v3 auth only) + Tenant: "tenant", // Name of the tenant (v2 auth only) +} +// Authenticate +err := c.Authenticate() +if err != nil { + panic(err) +} +// List all the containers +containers, err := c.ContainerNames(nil) +fmt.Println(containers) +// etc... +``` Additions --------- diff --git a/vendor/github.com/ncw/swift/slo.go b/vendor/github.com/ncw/swift/slo.go index 4d94aa76..6a10ddfc 100644 --- a/vendor/github.com/ncw/swift/slo.go +++ b/vendor/github.com/ncw/swift/slo.go @@ -88,7 +88,10 @@ func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName st return err } - if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments); err != nil { + //copy only metadata during move (other headers might not be safe for copying) + headers = headers.ObjectMetadata().ObjectHeaders() + + if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments, headers); err != nil { return err } @@ -100,7 +103,7 @@ func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName st } // createSLOManifest creates a static large object manifest -func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object) error { +func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object, h Headers) error { sloSegments := make([]swiftSegment, len(segments)) for i, segment := range segments { sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name) @@ -115,7 +118,7 @@ func (c *Connection) createSLOManifest(container string, path string, contentTyp values := url.Values{} values.Set("multipart-manifest", "put") - if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, nil, values); err != nil { + if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, h, values); err != nil { return err } @@ -127,7 +130,7 @@ func (file *StaticLargeObjectCreateFile) Close() error { } func (file *StaticLargeObjectCreateFile) Flush() error { - if err := file.conn.createSLOManifest(file.container, file.objectName, file.contentType, file.segmentContainer, file.segments); err != nil { + if err := file.conn.createSLOManifest(file.container, file.objectName, file.contentType, file.segmentContainer, file.segments, file.headers); err != nil { return err } return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size()) diff --git a/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/ncw/swift/swift.go index 38e69653..c8c28fa9 100644 --- a/vendor/github.com/ncw/swift/swift.go +++ b/vendor/github.com/ncw/swift/swift.go @@ -297,6 +297,7 @@ var ( TimeoutError = newError(408, "Timeout when reading or writing data") Forbidden = newError(403, "Operation forbidden") TooLargeObject = newError(413, "Too Large Object") + RateLimit = newError(498, "Rate Limit") // Mappings for authentication errors authErrorMap = errorMap{ @@ -311,6 +312,7 @@ var ( 403: Forbidden, 404: ContainerNotFound, 409: ContainerNotEmpty, + 498: RateLimit, } // Mappings for object errors @@ -321,6 +323,7 @@ var ( 404: ObjectNotFound, 413: TooLargeObject, 422: ObjectCorrupted, + 498: RateLimit, } ) @@ -1840,14 +1843,16 @@ type BulkDeleteResult struct { func (c *Connection) doBulkDelete(objects []string) (result BulkDeleteResult, err error) { var buffer bytes.Buffer for _, s := range objects { - buffer.WriteString(url.QueryEscape(s) + "\n") + u := url.URL{Path: s} + buffer.WriteString(u.String() + "\n") } resp, headers, err := c.storage(RequestOpts{ Operation: "DELETE", Parameters: url.Values{"bulk-delete": []string{"1"}}, Headers: Headers{ - "Accept": "application/json", - "Content-Type": "text/plain", + "Accept": "application/json", + "Content-Type": "text/plain", + "Content-Length": strconv.Itoa(buffer.Len()), }, ErrorMap: ContainerErrorMap, Body: &buffer, @@ -2074,6 +2079,15 @@ func (c *Connection) ObjectUpdate(container string, objectName string, h Headers return err } +// urlPathEscape escapes URL path the in string using URL escaping rules +// +// This mimics url.PathEscape which only available from go 1.8 +func urlPathEscape(in string) string { + var u url.URL + u.Path = in + return u.String() +} + // ObjectCopy does a server side copy of an object to a new position // // All metadata is preserved. If metadata is set in the headers then @@ -2086,7 +2100,7 @@ func (c *Connection) ObjectUpdate(container string, objectName string, h Headers func (c *Connection) ObjectCopy(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string, h Headers) (headers Headers, err error) { // Meta stuff extraHeaders := map[string]string{ - "Destination": dstContainer + "/" + dstObjectName, + "Destination": urlPathEscape(dstContainer + "/" + dstObjectName), } for key, value := range h { extraHeaders[key] = value From 6b73a9ab8973394f096953a4e0b4cde778c42885 Mon Sep 17 00:00:00 2001 From: Ryan Abrams Date: Mon, 13 Aug 2018 13:51:27 -0700 Subject: [PATCH 151/276] Ignore missing paths during enumeration It's possible to run into a race condition in which the enumerator lists lots of repositories and then starts the long process of enumerating through them. In that time if someone deletes a repo, the enumerator may error out. Signed-off-by: Ryan Abrams --- registry/storage/driver/walk.go | 11 ++++++- registry/storage/driver/walk_test.go | 47 ++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 1 deletion(-) create mode 100644 registry/storage/driver/walk_test.go diff --git a/registry/storage/driver/walk.go b/registry/storage/driver/walk.go index 2a5bc51f..8ded5b8b 100644 --- a/registry/storage/driver/walk.go +++ b/registry/storage/driver/walk.go @@ -4,6 +4,8 @@ import ( "context" "errors" "sort" + + "github.com/sirupsen/logrus" ) // ErrSkipDir is used as a return value from onFileFunc to indicate that @@ -32,7 +34,14 @@ func WalkFallback(ctx context.Context, driver StorageDriver, from string, f Walk // performance bottleneck. fileInfo, err := driver.Stat(ctx, child) if err != nil { - return err + switch err.(type) { + case PathNotFoundError: + // repository was removed in between listing and enumeration. Ignore it. + logrus.WithField("path", child).Infof("ignoring deleted path") + continue + default: + return err + } } err = f(fileInfo) if err == nil && fileInfo.IsDir() { diff --git a/registry/storage/driver/walk_test.go b/registry/storage/driver/walk_test.go new file mode 100644 index 00000000..d6e9beb6 --- /dev/null +++ b/registry/storage/driver/walk_test.go @@ -0,0 +1,47 @@ +package driver + +import ( + "context" + "fmt" + "testing" +) + +type changingFileSystem struct { + StorageDriver + fileset []string + keptFiles map[string]bool +} + +func (cfs *changingFileSystem) List(ctx context.Context, path string) ([]string, error) { + return cfs.fileset, nil +} +func (cfs *changingFileSystem) Stat(ctx context.Context, path string) (FileInfo, error) { + kept, ok := cfs.keptFiles[path] + if ok && kept { + return &FileInfoInternal{ + FileInfoFields: FileInfoFields{ + Path: path, + }, + }, nil + } + return nil, PathNotFoundError{} +} +func TestWalkFileRemoved(t *testing.T) { + d := &changingFileSystem{ + fileset: []string{"zoidberg", "bender"}, + keptFiles: map[string]bool{ + "zoidberg": true, + }, + } + infos := []FileInfo{} + err := WalkFallback(context.Background(), d, "", func(fileInfo FileInfo) error { + infos = append(infos, fileInfo) + return nil + }) + if len(infos) != 1 || infos[0].Path() != "zoidberg" { + t.Errorf(fmt.Sprintf("unexpected path set during walk: %s", infos)) + } + if err != nil { + t.Fatalf(err.Error()) + } +} From c88728f217631db8e870ae5bb784415c1e426c7f Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 5 Sep 2018 13:40:42 -0700 Subject: [PATCH 152/276] Fix registry stripping newlines from manifests Content must be preserved exactly Signed-off-by: Derek McGowan --- registry/storage/manifestlisthandler.go | 7 +++---- registry/storage/ocimanifesthandler.go | 7 +++---- registry/storage/schema2manifesthandler.go | 7 +++---- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/registry/storage/manifestlisthandler.go b/registry/storage/manifestlisthandler.go index 43468f66..ffc39161 100644 --- a/registry/storage/manifestlisthandler.go +++ b/registry/storage/manifestlisthandler.go @@ -2,7 +2,6 @@ package storage import ( "context" - "encoding/json" "fmt" "github.com/docker/distribution" @@ -23,12 +22,12 @@ var _ ManifestHandler = &manifestListHandler{} func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { dcontext.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") - var m manifestlist.DeserializedManifestList - if err := json.Unmarshal(content, &m); err != nil { + m := &manifestlist.DeserializedManifestList{} + if err := m.UnmarshalJSON(content); err != nil { return nil, err } - return &m, nil + return m, nil } func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { diff --git a/registry/storage/ocimanifesthandler.go b/registry/storage/ocimanifesthandler.go index 03264b8c..91a037a0 100644 --- a/registry/storage/ocimanifesthandler.go +++ b/registry/storage/ocimanifesthandler.go @@ -2,7 +2,6 @@ package storage import ( "context" - "encoding/json" "fmt" "net/url" @@ -26,12 +25,12 @@ var _ ManifestHandler = &ocischemaManifestHandler{} func (ms *ocischemaManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { dcontext.GetLogger(ms.ctx).Debug("(*ocischemaManifestHandler).Unmarshal") - var m ocischema.DeserializedManifest - if err := json.Unmarshal(content, &m); err != nil { + m := &ocischema.DeserializedManifest{} + if err := m.UnmarshalJSON(content); err != nil { return nil, err } - return &m, nil + return m, nil } func (ms *ocischemaManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { diff --git a/registry/storage/schema2manifesthandler.go b/registry/storage/schema2manifesthandler.go index 863ab2ba..acb3255b 100644 --- a/registry/storage/schema2manifesthandler.go +++ b/registry/storage/schema2manifesthandler.go @@ -2,7 +2,6 @@ package storage import ( "context" - "encoding/json" "errors" "fmt" "net/url" @@ -33,12 +32,12 @@ var _ ManifestHandler = &schema2ManifestHandler{} func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { dcontext.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") - var m schema2.DeserializedManifest - if err := json.Unmarshal(content, &m); err != nil { + m := &schema2.DeserializedManifest{} + if err := m.UnmarshalJSON(content); err != nil { return nil, err } - return &m, nil + return m, nil } func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { From 78238ef1a0762a1f9f742882e110741b2f83d656 Mon Sep 17 00:00:00 2001 From: Andrey Kostov Date: Mon, 8 Aug 2016 15:43:12 -0700 Subject: [PATCH 153/276] Add credentials argument for GCS driver Signed-off-by: Andrey Kostov --- registry/storage/driver/gcs/gcs.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/registry/storage/driver/gcs/gcs.go b/registry/storage/driver/gcs/gcs.go index d129bee7..d1276c25 100644 --- a/registry/storage/driver/gcs/gcs.go +++ b/registry/storage/driver/gcs/gcs.go @@ -17,6 +17,7 @@ package gcs import ( "bytes" "context" + "encoding/json" "fmt" "io" "io/ioutil" @@ -140,6 +141,27 @@ func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDri return nil, err } ts = jwtConf.TokenSource(context.Background()) + } else if credentials, ok := parameters["credentials"]; ok { + credentialMap, ok := credentials.(map[interface{}]interface{}) + if !ok { + return nil, fmt.Errorf("The credentials were not specified in the correct format") + } + + stringMap := map[string]interface{}{} + for k, v := range credentialMap { + key, ok := k.(string) + if !ok { + return nil, fmt.Errorf("One of the credential keys was not a string") + } + stringMap[key] = v + } + + data, err := json.Marshal(stringMap) + jwtConf, err := google.JWTConfigFromJSON(data, storage.ScopeFullControl) + if err != nil { + return nil, err + } + ts = jwtConf.TokenSource(context.Background()) } else { var err error ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) From 3f9f073cefbd5e1fd6a4f0c669acea08a5bcf935 Mon Sep 17 00:00:00 2001 From: Andrey Kostov Date: Thu, 11 Aug 2016 14:41:23 -0700 Subject: [PATCH 154/276] Edit configuration.md to add gcs credentials option Signed-off-by: Andrey Kostov --- docs/configuration.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/docs/configuration.md b/docs/configuration.md index df344709..fe95d594 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -100,6 +100,17 @@ storage: gcs: bucket: bucketname keyfile: /path/to/keyfile + credentials: + type: service_account + project_id: project_id_string + private_key_id: private_key_id_string + private_key: private_key_string + client_email: client@example.com + client_id: client_id_string + auth_uri: http://example.com/auth_uri + token_uri: http://example.com/token_uri + auth_provider_x509_cert_url: http://example.com/provider_cert_url + client_x509_cert_url: http://example.com/client_cert_url rootdirectory: /gcs/object/name/prefix chunksize: 5242880 s3: @@ -389,6 +400,17 @@ storage: gcs: bucket: bucketname keyfile: /path/to/keyfile + credentials: + type: service_account + project_id: project_id_string + private_key_id: private_key_id_string + private_key: private_key_string + client_email: client@example.com + client_id: client_id_string + auth_uri: http://example.com/auth_uri + token_uri: http://example.com/token_uri + auth_provider_x509_cert_url: http://example.com/provider_cert_url + client_x509_cert_url: http://example.com/client_cert_url rootdirectory: /gcs/object/name/prefix s3: accesskey: awsaccesskey From b424c3d870e096c2652c5d62ee350b0e44bd6210 Mon Sep 17 00:00:00 2001 From: Andrey Kostov Date: Mon, 15 Aug 2016 14:35:18 -0700 Subject: [PATCH 155/276] Better error handling for GCS credential argument addition Signed-off-by: Andrey Kostov --- registry/storage/driver/gcs/gcs.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/registry/storage/driver/gcs/gcs.go b/registry/storage/driver/gcs/gcs.go index d1276c25..a1dbd195 100644 --- a/registry/storage/driver/gcs/gcs.go +++ b/registry/storage/driver/gcs/gcs.go @@ -151,12 +151,16 @@ func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDri for k, v := range credentialMap { key, ok := k.(string) if !ok { - return nil, fmt.Errorf("One of the credential keys was not a string") + return nil, fmt.Errorf("One of the credential keys was not a string: %s", fmt.Sprint(k)) } stringMap[key] = v } data, err := json.Marshal(stringMap) + if err != nil { + return nil, fmt.Errorf("Failed to marshal gcs credentials to json") + } + jwtConf, err := google.JWTConfigFromJSON(data, storage.ScopeFullControl) if err != nil { return nil, err From f9187b257296ce68254be52f829c4da9a2e2ee3c Mon Sep 17 00:00:00 2001 From: Tony Holdstock-Brown Date: Tue, 6 Dec 2016 15:50:39 -0800 Subject: [PATCH 156/276] Add regulator to GCS Signed-off-by: Huu Nguyen --- registry/storage/driver/base/regulator.go | 43 +++++++++++++++++ .../storage/driver/base/regulator_test.go | 31 ++++++++++++ registry/storage/driver/filesystem/driver.go | 32 ++----------- registry/storage/driver/gcs/gcs.go | 48 ++++++++++++++----- 4 files changed, 114 insertions(+), 40 deletions(-) diff --git a/registry/storage/driver/base/regulator.go b/registry/storage/driver/base/regulator.go index 97a30ae4..9c5e6cc4 100644 --- a/registry/storage/driver/base/regulator.go +++ b/registry/storage/driver/base/regulator.go @@ -2,7 +2,10 @@ package base import ( "context" + "fmt" "io" + "reflect" + "strconv" "sync" storagedriver "github.com/docker/distribution/registry/storage/driver" @@ -15,6 +18,46 @@ type regulator struct { available uint64 } +// GetLimitFromParameter takes an interface type as decoded from the YAML +// configuration and returns a uint64 representing the maximum number of +// concurrent calls given a minimum limit and default. +// +// If the parameter supplied is of an invalid type this returns an error. +func GetLimitFromParameter(param interface{}, min, def uint64) (uint64, error) { + limit := def + + switch v := param.(type) { + case string: + var err error + if limit, err = strconv.ParseUint(v, 0, 64); err != nil { + return limit, fmt.Errorf("parameter must be an integer, '%v' invalid", param) + } + case uint64: + limit = v + case int, int32, int64: + val := reflect.ValueOf(v).Convert(reflect.TypeOf(param)).Int() + // if param is negative casting to uint64 will wrap around and + // give you the hugest thread limit ever. Let's be sensible, here + if val > 0 { + limit = uint64(val) + } else { + limit = min + } + case uint, uint32: + limit = reflect.ValueOf(v).Convert(reflect.TypeOf(param)).Uint() + case nil: + // use the default + default: + return 0, fmt.Errorf("invalid value '%#v'", param) + } + + if limit < min { + return min, nil + } + + return limit, nil +} + // NewRegulator wraps the given driver and is used to regulate concurrent calls // to the given storage driver to a maximum of the given limit. This is useful // for storage drivers that would otherwise create an unbounded number of OS diff --git a/registry/storage/driver/base/regulator_test.go b/registry/storage/driver/base/regulator_test.go index e4c0ad58..e30c6a75 100644 --- a/registry/storage/driver/base/regulator_test.go +++ b/registry/storage/driver/base/regulator_test.go @@ -1,6 +1,7 @@ package base import ( + "fmt" "sync" "testing" "time" @@ -65,3 +66,33 @@ func TestRegulatorEnterExit(t *testing.T) { } } } + +func TestGetLimitFromParameter(t *testing.T) { + tests := []struct { + Input interface{} + Expected uint64 + Min uint64 + Default uint64 + Err error + }{ + {"foo", 0, 5, 5, fmt.Errorf("parameter must be an integer, 'foo' invalid")}, + {"50", 50, 5, 5, nil}, + {"5", 25, 25, 50, nil}, // lower than Min returns Min + {nil, 50, 25, 50, nil}, // nil returns default + {812, 812, 25, 50, nil}, + } + + for _, item := range tests { + t.Run(fmt.Sprint(item.Input), func(t *testing.T) { + actual, err := GetLimitFromParameter(item.Input, item.Min, item.Default) + + if err != nil && item.Err != nil && err.Error() != item.Err.Error() { + t.Fatalf("GetLimitFromParameter error, expected %#v got %#v", item.Err, err) + } + + if actual != item.Expected { + t.Fatalf("GetLimitFromParameter result error, expected %d got %d", item.Expected, actual) + } + }) + } +} diff --git a/registry/storage/driver/filesystem/driver.go b/registry/storage/driver/filesystem/driver.go index 9036536b..8fc9d1ca 100644 --- a/registry/storage/driver/filesystem/driver.go +++ b/registry/storage/driver/filesystem/driver.go @@ -9,8 +9,6 @@ import ( "io/ioutil" "os" "path" - "reflect" - "strconv" "time" storagedriver "github.com/docker/distribution/registry/storage/driver" @@ -85,33 +83,9 @@ func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, e rootDirectory = fmt.Sprint(rootDir) } - // Get maximum number of threads for blocking filesystem operations, - // if specified - threads := parameters["maxthreads"] - switch v := threads.(type) { - case string: - if maxThreads, err = strconv.ParseUint(v, 0, 64); err != nil { - return nil, fmt.Errorf("maxthreads parameter must be an integer, %v invalid", threads) - } - case uint64: - maxThreads = v - case int, int32, int64: - val := reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Int() - // If threads is negative casting to uint64 will wrap around and - // give you the hugest thread limit ever. Let's be sensible, here - if val > 0 { - maxThreads = uint64(val) - } - case uint, uint32: - maxThreads = reflect.ValueOf(v).Convert(reflect.TypeOf(threads)).Uint() - case nil: - // do nothing - default: - return nil, fmt.Errorf("invalid value for maxthreads: %#v", threads) - } - - if maxThreads < minThreads { - maxThreads = minThreads + maxThreads, err = base.GetLimitFromParameter(parameters["maxthreads"], minThreads, defaultMaxThreads) + if err != nil { + return nil, fmt.Errorf("maxthreads config error: %s", err.Error()) } } diff --git a/registry/storage/driver/gcs/gcs.go b/registry/storage/driver/gcs/gcs.go index a1dbd195..b6784487 100644 --- a/registry/storage/driver/gcs/gcs.go +++ b/registry/storage/driver/gcs/gcs.go @@ -9,8 +9,6 @@ // // Note that the contents of incomplete uploads are not accessible even though // Stat returns their length -// -// +build include_gcs package gcs @@ -50,6 +48,8 @@ const ( uploadSessionContentType = "application/x-docker-upload-session" minChunkSize = 256 * 1024 defaultChunkSize = 20 * minChunkSize + defaultMaxConcurrency = 50 + minConcurrency = 25 maxTries = 5 ) @@ -65,6 +65,12 @@ type driverParameters struct { client *http.Client rootDirectory string chunkSize int + + // maxConcurrency limits the number of concurrent driver operations + // to GCS, which ultimately increases reliability of many simultaneous + // pushes by ensuring we aren't DoSing our own server with many + // connections. + maxConcurrency uint64 } func init() { @@ -90,6 +96,16 @@ type driver struct { chunkSize int } +// Wrapper wraps `driver` with a throttler, ensuring that no more than N +// GCS actions can occur concurrently. The default limit is 75. +type Wrapper struct { + baseEmbed +} + +type baseEmbed struct { + base.Base +} + // FromParameters constructs a new Driver with a given parameters map // Required parameters: // - bucket @@ -174,13 +190,19 @@ func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDri } } + maxConcurrency, err := base.GetLimitFromParameter(parameters["maxconcurrency"], minConcurrency, defaultMaxConcurrency) + if err != nil { + return nil, fmt.Errorf("maxconcurrency config error: %s", err) + } + params := driverParameters{ - bucket: fmt.Sprint(bucket), - rootDirectory: fmt.Sprint(rootDirectory), - email: jwtConf.Email, - privateKey: jwtConf.PrivateKey, - client: oauth2.NewClient(context.Background(), ts), - chunkSize: chunkSize, + bucket: fmt.Sprint(bucket), + rootDirectory: fmt.Sprint(rootDirectory), + email: jwtConf.Email, + privateKey: jwtConf.PrivateKey, + client: oauth2.NewClient(context.Background(), ts), + chunkSize: chunkSize, + maxConcurrency: maxConcurrency, } return New(params) @@ -204,8 +226,12 @@ func New(params driverParameters) (storagedriver.StorageDriver, error) { chunkSize: params.chunkSize, } - return &base.Base{ - StorageDriver: d, + return &Wrapper{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: base.NewRegulator(d, params.maxConcurrency), + }, + }, }, nil } @@ -890,7 +916,7 @@ func (d *driver) context(context context.Context) context.Context { } func (d *driver) pathToKey(path string) string { - return strings.TrimRight(d.rootDirectory+strings.TrimLeft(path, "/"), "/") + return strings.TrimSpace(strings.TrimRight(d.rootDirectory+strings.TrimLeft(path, "/"), "/")) } func (d *driver) pathToDirKey(path string) string { From 69299d93d9b324ad2a7d9c9081f280f1e74ce74f Mon Sep 17 00:00:00 2001 From: Huu Nguyen Date: Mon, 7 May 2018 15:04:06 -0700 Subject: [PATCH 157/276] Use existing jwtConf instead of creating a scoped one Signed-off-by: Huu Nguyen --- registry/storage/driver/gcs/gcs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/registry/storage/driver/gcs/gcs.go b/registry/storage/driver/gcs/gcs.go index b6784487..07f58afa 100644 --- a/registry/storage/driver/gcs/gcs.go +++ b/registry/storage/driver/gcs/gcs.go @@ -177,7 +177,7 @@ func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDri return nil, fmt.Errorf("Failed to marshal gcs credentials to json") } - jwtConf, err := google.JWTConfigFromJSON(data, storage.ScopeFullControl) + jwtConf, err = google.JWTConfigFromJSON(data, storage.ScopeFullControl) if err != nil { return nil, err } From 7a195dd5ca4b9b399da0b1b36441dfa12658e660 Mon Sep 17 00:00:00 2001 From: Huu Nguyen Date: Tue, 15 May 2018 11:20:09 -0700 Subject: [PATCH 158/276] Add back include_gcs build constraint Signed-off-by: Huu Nguyen --- registry/storage/driver/gcs/gcs.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/registry/storage/driver/gcs/gcs.go b/registry/storage/driver/gcs/gcs.go index 07f58afa..86dc87f1 100644 --- a/registry/storage/driver/gcs/gcs.go +++ b/registry/storage/driver/gcs/gcs.go @@ -9,6 +9,8 @@ // // Note that the contents of incomplete uploads are not accessible even though // Stat returns their length +// +// +build include_gcs package gcs From d1f36d46c9b2ea29a14dc7ecd2751f17a2c473bc Mon Sep 17 00:00:00 2001 From: ruicao Date: Fri, 7 Sep 2018 18:13:53 +0800 Subject: [PATCH 159/276] Fix some typos Signed-off-by: ruicao --- docs/spec/auth/oauth.md | 2 +- manifest/ocischema/manifest.go | 2 +- manifest/schema1/manifest.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/spec/auth/oauth.md b/docs/spec/auth/oauth.md index f4fdec81..f646f454 100644 --- a/docs/spec/auth/oauth.md +++ b/docs/spec/auth/oauth.md @@ -39,7 +39,7 @@ Content-Type: application/x-www-form-urlencoded
(REQUIRED) Type of grant used to get token. When getting a refresh token using credentials this type should be set to "password" and have the - accompanying username and password paramters. Type "authorization_code" + accompanying username and password parameters. Type "authorization_code" is reserved for future use for authenticating to an authorization server without having to send credentials directly from the client. When requesting an access token with a refresh token this should be set to diff --git a/manifest/ocischema/manifest.go b/manifest/ocischema/manifest.go index 6de17f9a..09ce78b5 100644 --- a/manifest/ocischema/manifest.go +++ b/manifest/ocischema/manifest.go @@ -52,7 +52,7 @@ type Manifest struct { Annotations map[string]string `json:"annotations,omitempty"` } -// References returnes the descriptors of this manifests references. +// References returns the descriptors of this manifests references. func (m Manifest) References() []distribution.Descriptor { references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) references = append(references, m.Config) diff --git a/manifest/schema1/manifest.go b/manifest/schema1/manifest.go index 65042a75..5a06b54b 100644 --- a/manifest/schema1/manifest.go +++ b/manifest/schema1/manifest.go @@ -138,7 +138,7 @@ func (sm *SignedManifest) UnmarshalJSON(b []byte) error { return nil } -// References returnes the descriptors of this manifests references +// References returns the descriptors of this manifests references func (sm SignedManifest) References() []distribution.Descriptor { dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) for i, fsLayer := range sm.FSLayers { From 877d706b38075643527f8a57839a05926a2fbb42 Mon Sep 17 00:00:00 2001 From: David Wu Date: Tue, 11 Sep 2018 00:12:40 -0700 Subject: [PATCH 160/276] remove dependencies on resumable Signed-off-by: David Wu --- registry/storage/blobwriter_resumable.go | 45 +- registry/storage/digester_resumable_test.go | 22 - vendor.conf | 1 - vendor/github.com/stevvooe/resumable/LICENSE | 28 - .../github.com/stevvooe/resumable/README.md | 6 - .../stevvooe/resumable/resumable.go | 51 - .../stevvooe/resumable/sha256/resume.go | 71 -- .../stevvooe/resumable/sha256/sha256.go | 193 --- .../stevvooe/resumable/sha256/sha256block.go | 126 -- .../resumable/sha256/sha256block_386.s | 283 ----- .../resumable/sha256/sha256block_amd64.s | 1044 ----------------- .../resumable/sha256/sha256block_decl.go | 11 - .../resumable/sha256/sha256block_generic.go | 9 - .../resumable/sha256/sha256block_s390x.go | 12 - .../resumable/sha256/sha256block_s390x.s | 34 - .../stevvooe/resumable/sha512/resume.go | 63 - .../stevvooe/resumable/sha512/sha512.go | 288 ----- .../stevvooe/resumable/sha512/sha512block.go | 142 --- .../resumable/sha512/sha512block_amd64.s | 273 ----- .../resumable/sha512/sha512block_decl.go | 11 - .../resumable/sha512/sha512block_generic.go | 9 - .../resumable/sha512/sha512block_s390x.go | 12 - .../resumable/sha512/sha512block_s390x.s | 34 - 23 files changed, 27 insertions(+), 2741 deletions(-) delete mode 100644 registry/storage/digester_resumable_test.go delete mode 100644 vendor/github.com/stevvooe/resumable/LICENSE delete mode 100644 vendor/github.com/stevvooe/resumable/README.md delete mode 100644 vendor/github.com/stevvooe/resumable/resumable.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha256/resume.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha256/sha256.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha256/sha256block.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s delete mode 100644 vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s delete mode 100644 vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha256/sha256block_generic.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.s delete mode 100644 vendor/github.com/stevvooe/resumable/sha512/resume.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha512/sha512.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha512/sha512block.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s delete mode 100644 vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha512/sha512block_generic.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.go delete mode 100644 vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.s diff --git a/registry/storage/blobwriter_resumable.go b/registry/storage/blobwriter_resumable.go index b700982a..1f3b93d4 100644 --- a/registry/storage/blobwriter_resumable.go +++ b/registry/storage/blobwriter_resumable.go @@ -4,17 +4,14 @@ package storage import ( "context" + "encoding" "fmt" + "hash" "path" "strconv" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/sirupsen/logrus" - "github.com/stevvooe/resumable" - - // register resumable hashes with import - _ "github.com/stevvooe/resumable/sha256" - _ "github.com/stevvooe/resumable/sha512" ) // resumeDigest attempts to restore the state of the internal hash function @@ -24,12 +21,18 @@ func (bw *blobWriter) resumeDigest(ctx context.Context) error { return errResumableDigestNotAvailable } - h, ok := bw.digester.Hash().(resumable.Hash) + h, ok := bw.digester.Hash().(encoding.BinaryMarshaler) if !ok { return errResumableDigestNotAvailable } + + state, err := h.MarshalBinary() + if err != nil { + return err + } + offset := bw.fileWriter.Size() - if offset == h.Len() { + if offset == int64(len(state)) { // State of digester is already at the requested offset. return nil } @@ -52,20 +55,26 @@ func (bw *blobWriter) resumeDigest(ctx context.Context) error { if hashStateMatch.offset == 0 { // No need to load any state, just reset the hasher. - h.Reset() + h.(hash.Hash).Reset() } else { storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) if err != nil { return err } - if err = h.Restore(storedState); err != nil { + // This type assertion is safe since we already did an assertion at the beginning + if err = h.(encoding.BinaryUnmarshaler).UnmarshalBinary(storedState); err != nil { + return err + } + + state, err = h.(encoding.BinaryMarshaler).MarshalBinary() + if err != nil { return err } } // Mind the gap. - if gapLen := offset - h.Len(); gapLen > 0 { + if gapLen := offset - int64(len(state)); gapLen > 0 { return errResumableDigestNotAvailable } @@ -120,26 +129,26 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error { return errResumableDigestNotAvailable } - h, ok := bw.digester.Hash().(resumable.Hash) + h, ok := bw.digester.Hash().(encoding.BinaryMarshaler) if !ok { return errResumableDigestNotAvailable } + state, err := h.MarshalBinary() + if err != nil { + return fmt.Errorf("could not marshal: %v", err) + } + uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ name: bw.blobStore.repository.Named().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), - offset: h.Len(), + offset: int64(len(state)), }) if err != nil { return err } - hashState, err := h.State() - if err != nil { - return err - } - - return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) + return bw.driver.PutContent(ctx, uploadHashStatePath, state) } diff --git a/registry/storage/digester_resumable_test.go b/registry/storage/digester_resumable_test.go deleted file mode 100644 index 54ece3c4..00000000 --- a/registry/storage/digester_resumable_test.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !noresumabledigest - -package storage - -import ( - "testing" - - digest "github.com/opencontainers/go-digest" - "github.com/stevvooe/resumable" - _ "github.com/stevvooe/resumable/sha256" -) - -// TestResumableDetection just ensures that the resumable capability of a hash -// is exposed through the digester type, which is just a hash plus a Digest -// method. -func TestResumableDetection(t *testing.T) { - d := digest.Canonical.Digester() - - if _, ok := d.Hash().(resumable.Hash); !ok { - t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash()) - } -} diff --git a/vendor.conf b/vendor.conf index e676bd8c..7aec1d05 100644 --- a/vendor.conf +++ b/vendor.conf @@ -30,7 +30,6 @@ github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 -github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 diff --git a/vendor/github.com/stevvooe/resumable/LICENSE b/vendor/github.com/stevvooe/resumable/LICENSE deleted file mode 100644 index 2815cc36..00000000 --- a/vendor/github.com/stevvooe/resumable/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/stevvooe/resumable/README.md b/vendor/github.com/stevvooe/resumable/README.md deleted file mode 100644 index d2d3fb89..00000000 --- a/vendor/github.com/stevvooe/resumable/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# go-crypto -A Subset of the Go `crypto` Package with a Resumable Hash Interface - -### Documentation - -GoDocs: http://godoc.org/github.com/stevvooe/resumable diff --git a/vendor/github.com/stevvooe/resumable/resumable.go b/vendor/github.com/stevvooe/resumable/resumable.go deleted file mode 100644 index 62ec970a..00000000 --- a/vendor/github.com/stevvooe/resumable/resumable.go +++ /dev/null @@ -1,51 +0,0 @@ -// Package resumable registers resumable versions of hash functions. Resumable -// varieties of hash functions are available via the standard crypto package. -// Support can be checked by type assertion against the resumable.Hash -// interface. -// -// While one can use these sub-packages directly, it makes more sense to -// register them using side-effect imports: -// -// import _ "github.com/stevvooe/resumable/sha256" -// -// This will make the resumable hashes available to the application through -// the standard crypto package. For example, if a new sha256 is required, one -// should use the following: -// -// h := crypto.SHA256.New() -// -// Such a features allows one to control the inclusion of resumable hash -// support in a single file. Applications that require the resumable hash -// implementation can type switch to detect support, while other parts of the -// application can be completely oblivious to the presence of the alternative -// hash functions. -// -// Also note that the implementations available in this package are completely -// untouched from their Go counterparts in the standard library. Only an extra -// file is added to each package to implement the extra resumable hash -// functions. -package resumable - -import ( - "fmt" - "hash" -) - -var ( - // ErrBadState is returned if Restore fails post-unmarshaling validation. - ErrBadState = fmt.Errorf("bad hash state") -) - -// Hash is the common interface implemented by all resumable hash functions. -type Hash interface { - hash.Hash - - // Len returns the number of bytes written to the Hash so far. - Len() int64 - - // State returns a snapshot of the state of the Hash. - State() ([]byte, error) - - // Restore resets the Hash to the given state. - Restore(state []byte) error -} diff --git a/vendor/github.com/stevvooe/resumable/sha256/resume.go b/vendor/github.com/stevvooe/resumable/sha256/resume.go deleted file mode 100644 index 4e1cf4e6..00000000 --- a/vendor/github.com/stevvooe/resumable/sha256/resume.go +++ /dev/null @@ -1,71 +0,0 @@ -package sha256 - -import ( - "bytes" - "crypto" - "encoding/gob" - - "github.com/stevvooe/resumable" - // import to ensure that our init function runs after the standard package - _ "crypto/sha256" -) - -// Len returns the number of bytes which have been written to the digest. -func (d *digest) Len() int64 { - return int64(d.len) -} - -// State returns a snapshot of the state of the digest. -func (d *digest) State() ([]byte, error) { - var buf bytes.Buffer - encoder := gob.NewEncoder(&buf) - - function := crypto.SHA256 - if d.is224 { - function = crypto.SHA224 - } - - // We encode this way so that we do not have - // to export these fields of the digest struct. - vals := []interface{}{ - d.h, d.x, d.nx, d.len, function, - } - - for _, val := range vals { - if err := encoder.Encode(val); err != nil { - return nil, err - } - } - - return buf.Bytes(), nil -} - -// Restore resets the digest to the given state. -func (d *digest) Restore(state []byte) error { - decoder := gob.NewDecoder(bytes.NewReader(state)) - - var function uint - - // We decode this way so that we do not have - // to export these fields of the digest struct. - vals := []interface{}{ - &d.h, &d.x, &d.nx, &d.len, &function, - } - - for _, val := range vals { - if err := decoder.Decode(val); err != nil { - return err - } - } - - switch crypto.Hash(function) { - case crypto.SHA224: - d.is224 = true - case crypto.SHA256: - d.is224 = false - default: - return resumable.ErrBadState - } - - return nil -} diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256.go b/vendor/github.com/stevvooe/resumable/sha256/sha256.go deleted file mode 100644 index 74b05b92..00000000 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sha256 implements the SHA224 and SHA256 hash algorithms as defined -// in FIPS 180-4. -package sha256 - -import ( - "crypto" - "hash" -) - -func init() { - crypto.RegisterHash(crypto.SHA224, New224) - crypto.RegisterHash(crypto.SHA256, New) -} - -// The size of a SHA256 checksum in bytes. -const Size = 32 - -// The size of a SHA224 checksum in bytes. -const Size224 = 28 - -// The blocksize of SHA256 and SHA224 in bytes. -const BlockSize = 64 - -const ( - chunk = 64 - init0 = 0x6A09E667 - init1 = 0xBB67AE85 - init2 = 0x3C6EF372 - init3 = 0xA54FF53A - init4 = 0x510E527F - init5 = 0x9B05688C - init6 = 0x1F83D9AB - init7 = 0x5BE0CD19 - init0_224 = 0xC1059ED8 - init1_224 = 0x367CD507 - init2_224 = 0x3070DD17 - init3_224 = 0xF70E5939 - init4_224 = 0xFFC00B31 - init5_224 = 0x68581511 - init6_224 = 0x64F98FA7 - init7_224 = 0xBEFA4FA4 -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - h [8]uint32 - x [chunk]byte - nx int - len uint64 - is224 bool // mark if this digest is SHA-224 -} - -func (d *digest) Reset() { - if !d.is224 { - d.h[0] = init0 - d.h[1] = init1 - d.h[2] = init2 - d.h[3] = init3 - d.h[4] = init4 - d.h[5] = init5 - d.h[6] = init6 - d.h[7] = init7 - } else { - d.h[0] = init0_224 - d.h[1] = init1_224 - d.h[2] = init2_224 - d.h[3] = init3_224 - d.h[4] = init4_224 - d.h[5] = init5_224 - d.h[6] = init6_224 - d.h[7] = init7_224 - } - d.nx = 0 - d.len = 0 -} - -// New returns a new hash.Hash computing the SHA256 checksum. -func New() hash.Hash { - d := new(digest) - d.Reset() - return d -} - -// New224 returns a new hash.Hash computing the SHA224 checksum. -func New224() hash.Hash { - d := new(digest) - d.is224 = true - d.Reset() - return d -} - -func (d *digest) Size() int { - if !d.is224 { - return Size - } - return Size224 -} - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == chunk { - block(d, d.x[:]) - d.nx = 0 - } - p = p[n:] - } - if len(p) >= chunk { - n := len(p) &^ (chunk - 1) - block(d, p[:n]) - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -func (d0 *digest) Sum(in []byte) []byte { - // Make a copy of d0 so that caller can keep writing and summing. - d := *d0 - hash := d.checkSum() - if d.is224 { - return append(in, hash[:Size224]...) - } - return append(in, hash[:]...) -} - -func (d *digest) checkSum() [Size]byte { - len := d.len - // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. - var tmp [64]byte - tmp[0] = 0x80 - if len%64 < 56 { - d.Write(tmp[0 : 56-len%64]) - } else { - d.Write(tmp[0 : 64+56-len%64]) - } - - // Length in bits. - len <<= 3 - for i := uint(0); i < 8; i++ { - tmp[i] = byte(len >> (56 - 8*i)) - } - d.Write(tmp[0:8]) - - if d.nx != 0 { - panic("d.nx != 0") - } - - h := d.h[:] - if d.is224 { - h = d.h[:7] - } - - var digest [Size]byte - for i, s := range h { - digest[i*4] = byte(s >> 24) - digest[i*4+1] = byte(s >> 16) - digest[i*4+2] = byte(s >> 8) - digest[i*4+3] = byte(s) - } - - return digest -} - -// Sum256 returns the SHA256 checksum of the data. -func Sum256(data []byte) [Size]byte { - var d digest - d.Reset() - d.Write(data) - return d.checkSum() -} - -// Sum224 returns the SHA224 checksum of the data. -func Sum224(data []byte) (sum224 [Size224]byte) { - var d digest - d.is224 = true - d.Reset() - d.Write(data) - sum := d.checkSum() - copy(sum224[:], sum[:Size224]) - return -} diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block.go b/vendor/github.com/stevvooe/resumable/sha256/sha256block.go deleted file mode 100644 index d43bbf02..00000000 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256block.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// SHA256 block step. -// In its own file so that a faster assembly or C version -// can be substituted easily. - -package sha256 - -var _K = []uint32{ - 0x428a2f98, - 0x71374491, - 0xb5c0fbcf, - 0xe9b5dba5, - 0x3956c25b, - 0x59f111f1, - 0x923f82a4, - 0xab1c5ed5, - 0xd807aa98, - 0x12835b01, - 0x243185be, - 0x550c7dc3, - 0x72be5d74, - 0x80deb1fe, - 0x9bdc06a7, - 0xc19bf174, - 0xe49b69c1, - 0xefbe4786, - 0x0fc19dc6, - 0x240ca1cc, - 0x2de92c6f, - 0x4a7484aa, - 0x5cb0a9dc, - 0x76f988da, - 0x983e5152, - 0xa831c66d, - 0xb00327c8, - 0xbf597fc7, - 0xc6e00bf3, - 0xd5a79147, - 0x06ca6351, - 0x14292967, - 0x27b70a85, - 0x2e1b2138, - 0x4d2c6dfc, - 0x53380d13, - 0x650a7354, - 0x766a0abb, - 0x81c2c92e, - 0x92722c85, - 0xa2bfe8a1, - 0xa81a664b, - 0xc24b8b70, - 0xc76c51a3, - 0xd192e819, - 0xd6990624, - 0xf40e3585, - 0x106aa070, - 0x19a4c116, - 0x1e376c08, - 0x2748774c, - 0x34b0bcb5, - 0x391c0cb3, - 0x4ed8aa4a, - 0x5b9cca4f, - 0x682e6ff3, - 0x748f82ee, - 0x78a5636f, - 0x84c87814, - 0x8cc70208, - 0x90befffa, - 0xa4506ceb, - 0xbef9a3f7, - 0xc67178f2, -} - -func blockGeneric(dig *digest, p []byte) { - var w [64]uint32 - h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] - for len(p) >= chunk { - // Can interlace the computation of w with the - // rounds below if needed for speed. - for i := 0; i < 16; i++ { - j := i * 4 - w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) - } - for i := 16; i < 64; i++ { - v1 := w[i-2] - t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) - v2 := w[i-15] - t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) - w[i] = t1 + w[i-7] + t2 + w[i-16] - } - - a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 - - for i := 0; i < 64; i++ { - t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] - - t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) - - h = g - g = f - f = e - e = d + t1 - d = c - c = b - b = a - a = t1 + t2 - } - - h0 += a - h1 += b - h2 += c - h3 += d - h4 += e - h5 += f - h6 += g - h7 += h - - p = p[chunk:] - } - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 -} diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s b/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s deleted file mode 100644 index 33ed027e..00000000 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// SHA256 block routine. See sha256block.go for Go equivalent. -// -// The algorithm is detailed in FIPS 180-4: -// -// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf -// -// Wt = Mt; for 0 <= t <= 15 -// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 -// -// a = H0 -// b = H1 -// c = H2 -// d = H3 -// e = H4 -// f = H5 -// g = H6 -// h = H7 -// -// for t = 0 to 63 { -// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt -// T2 = BIGSIGMA0(a) + Maj(a,b,c) -// h = g -// g = f -// f = e -// e = d + T1 -// d = c -// c = b -// b = a -// a = T1 + T2 -// } -// -// H0 = a + H0 -// H1 = b + H1 -// H2 = c + H2 -// H3 = d + H3 -// H4 = e + H4 -// H5 = f + H5 -// H6 = g + H6 -// H7 = h + H7 - -// Wt = Mt; for 0 <= t <= 15 -#define MSGSCHEDULE0(index) \ - MOVL (index*4)(SI), AX; \ - BSWAPL AX; \ - MOVL AX, (index*4)(BP) - -// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 -// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x) -// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x) -#define MSGSCHEDULE1(index) \ - MOVL ((index-2)*4)(BP), AX; \ - MOVL AX, CX; \ - RORL $17, AX; \ - MOVL CX, DX; \ - RORL $19, CX; \ - SHRL $10, DX; \ - MOVL ((index-15)*4)(BP), BX; \ - XORL CX, AX; \ - MOVL BX, CX; \ - XORL DX, AX; \ - RORL $7, BX; \ - MOVL CX, DX; \ - SHRL $3, DX; \ - RORL $18, CX; \ - ADDL ((index-7)*4)(BP), AX; \ - XORL CX, BX; \ - XORL DX, BX; \ - ADDL ((index-16)*4)(BP), BX; \ - ADDL BX, AX; \ - MOVL AX, ((index)*4)(BP) - -// Calculate T1 in AX - uses AX, BX, CX and DX registers. -// Wt is passed in AX. -// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt -// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x) -// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) -#define SHA256T1(const, e, f, g, h) \ - MOVL (h*4)(DI), BX; \ - ADDL AX, BX; \ - MOVL (e*4)(DI), AX; \ - ADDL $const, BX; \ - MOVL (e*4)(DI), CX; \ - RORL $6, AX; \ - MOVL (e*4)(DI), DX; \ - RORL $11, CX; \ - XORL CX, AX; \ - MOVL (e*4)(DI), CX; \ - RORL $25, DX; \ - ANDL (f*4)(DI), CX; \ - XORL AX, DX; \ - MOVL (e*4)(DI), AX; \ - NOTL AX; \ - ADDL DX, BX; \ - ANDL (g*4)(DI), AX; \ - XORL CX, AX; \ - ADDL BX, AX - -// Calculate T2 in BX - uses AX, BX, CX and DX registers. -// T2 = BIGSIGMA0(a) + Maj(a, b, c) -// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x) -// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) -#define SHA256T2(a, b, c) \ - MOVL (a*4)(DI), AX; \ - MOVL (c*4)(DI), BX; \ - RORL $2, AX; \ - MOVL (a*4)(DI), DX; \ - ANDL (b*4)(DI), BX; \ - RORL $13, DX; \ - MOVL (a*4)(DI), CX; \ - ANDL (c*4)(DI), CX; \ - XORL DX, AX; \ - XORL CX, BX; \ - MOVL (a*4)(DI), DX; \ - MOVL (b*4)(DI), CX; \ - RORL $22, DX; \ - ANDL (a*4)(DI), CX; \ - XORL CX, BX; \ - XORL DX, AX; \ - ADDL AX, BX - -// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. -// The values for e and a are stored in d and h, ready for rotation. -#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \ - SHA256T1(const, e, f, g, h); \ - MOVL AX, 292(SP); \ - SHA256T2(a, b, c); \ - MOVL 292(SP), AX; \ - ADDL AX, BX; \ - ADDL AX, (d*4)(DI); \ - MOVL BX, (h*4)(DI) - -#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE0(index); \ - SHA256ROUND(index, const, a, b, c, d, e, f, g, h) - -#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE1(index); \ - SHA256ROUND(index, const, a, b, c, d, e, f, g, h) - -TEXT ·block(SB),0,$296-16 - MOVL p_base+4(FP), SI - MOVL p_len+8(FP), DX - SHRL $6, DX - SHLL $6, DX - - LEAL (SI)(DX*1), DI - MOVL DI, 288(SP) - CMPL SI, DI - JEQ end - - LEAL 256(SP), DI // variables - - MOVL dig+0(FP), BP - MOVL (0*4)(BP), AX // a = H0 - MOVL AX, (0*4)(DI) - MOVL (1*4)(BP), BX // b = H1 - MOVL BX, (1*4)(DI) - MOVL (2*4)(BP), CX // c = H2 - MOVL CX, (2*4)(DI) - MOVL (3*4)(BP), DX // d = H3 - MOVL DX, (3*4)(DI) - MOVL (4*4)(BP), AX // e = H4 - MOVL AX, (4*4)(DI) - MOVL (5*4)(BP), BX // f = H5 - MOVL BX, (5*4)(DI) - MOVL (6*4)(BP), CX // g = H6 - MOVL CX, (6*4)(DI) - MOVL (7*4)(BP), DX // h = H7 - MOVL DX, (7*4)(DI) - -loop: - MOVL SP, BP // message schedule - - SHA256ROUND0(0, 0x428a2f98, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND0(1, 0x71374491, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND0(2, 0xb5c0fbcf, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND0(3, 0xe9b5dba5, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND0(4, 0x3956c25b, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND0(5, 0x59f111f1, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND0(6, 0x923f82a4, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND0(7, 0xab1c5ed5, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND0(8, 0xd807aa98, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND0(9, 0x12835b01, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND0(10, 0x243185be, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND0(11, 0x550c7dc3, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND0(12, 0x72be5d74, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND0(13, 0x80deb1fe, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND0(14, 0x9bdc06a7, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND0(15, 0xc19bf174, 1, 2, 3, 4, 5, 6, 7, 0) - - SHA256ROUND1(16, 0xe49b69c1, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(17, 0xefbe4786, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(18, 0x0fc19dc6, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(19, 0x240ca1cc, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(20, 0x2de92c6f, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(21, 0x4a7484aa, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(22, 0x5cb0a9dc, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(23, 0x76f988da, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(24, 0x983e5152, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(25, 0xa831c66d, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(26, 0xb00327c8, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(27, 0xbf597fc7, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(28, 0xc6e00bf3, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(29, 0xd5a79147, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(30, 0x06ca6351, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(31, 0x14292967, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(32, 0x27b70a85, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(33, 0x2e1b2138, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(34, 0x4d2c6dfc, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(35, 0x53380d13, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(36, 0x650a7354, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(37, 0x766a0abb, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(38, 0x81c2c92e, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(39, 0x92722c85, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(40, 0xa2bfe8a1, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(41, 0xa81a664b, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(42, 0xc24b8b70, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(43, 0xc76c51a3, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(44, 0xd192e819, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(45, 0xd6990624, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(46, 0xf40e3585, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(47, 0x106aa070, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(48, 0x19a4c116, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(49, 0x1e376c08, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(50, 0x2748774c, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(51, 0x34b0bcb5, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(52, 0x391c0cb3, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(53, 0x4ed8aa4a, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(54, 0x5b9cca4f, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(55, 0x682e6ff3, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(56, 0x748f82ee, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(57, 0x78a5636f, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(58, 0x84c87814, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(59, 0x8cc70208, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(60, 0x90befffa, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(61, 0xa4506ceb, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(62, 0xbef9a3f7, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(63, 0xc67178f2, 1, 2, 3, 4, 5, 6, 7, 0) - - MOVL dig+0(FP), BP - MOVL (0*4)(BP), AX // H0 = a + H0 - ADDL (0*4)(DI), AX - MOVL AX, (0*4)(DI) - MOVL AX, (0*4)(BP) - MOVL (1*4)(BP), BX // H1 = b + H1 - ADDL (1*4)(DI), BX - MOVL BX, (1*4)(DI) - MOVL BX, (1*4)(BP) - MOVL (2*4)(BP), CX // H2 = c + H2 - ADDL (2*4)(DI), CX - MOVL CX, (2*4)(DI) - MOVL CX, (2*4)(BP) - MOVL (3*4)(BP), DX // H3 = d + H3 - ADDL (3*4)(DI), DX - MOVL DX, (3*4)(DI) - MOVL DX, (3*4)(BP) - MOVL (4*4)(BP), AX // H4 = e + H4 - ADDL (4*4)(DI), AX - MOVL AX, (4*4)(DI) - MOVL AX, (4*4)(BP) - MOVL (5*4)(BP), BX // H5 = f + H5 - ADDL (5*4)(DI), BX - MOVL BX, (5*4)(DI) - MOVL BX, (5*4)(BP) - MOVL (6*4)(BP), CX // H6 = g + H6 - ADDL (6*4)(DI), CX - MOVL CX, (6*4)(DI) - MOVL CX, (6*4)(BP) - MOVL (7*4)(BP), DX // H7 = h + H7 - ADDL (7*4)(DI), DX - MOVL DX, (7*4)(DI) - MOVL DX, (7*4)(BP) - - ADDL $64, SI - CMPL SI, 288(SP) - JB loop - -end: - RET diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s b/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s deleted file mode 100644 index e9705b94..00000000 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s +++ /dev/null @@ -1,1044 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// SHA256 block routine. See sha256block.go for Go equivalent. -// -// The algorithm is detailed in FIPS 180-4: -// -// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf - -// The avx2-version is described in an Intel White-Paper: -// "Fast SHA-256 Implementations on Intel Architecture Processors" -// To find it, surf to http://www.intel.com/p/en_US/embedded -// and search for that title. -// AVX2 version by Intel, same algorithm as code in Linux kernel: -// https://github.com/torvalds/linux/blob/master/arch/x86/crypto/sha256-avx2-asm.S -// by -// James Guilford -// Kirk Yap -// Tim Chen - -// Wt = Mt; for 0 <= t <= 15 -// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 -// -// a = H0 -// b = H1 -// c = H2 -// d = H3 -// e = H4 -// f = H5 -// g = H6 -// h = H7 -// -// for t = 0 to 63 { -// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt -// T2 = BIGSIGMA0(a) + Maj(a,b,c) -// h = g -// g = f -// f = e -// e = d + T1 -// d = c -// c = b -// b = a -// a = T1 + T2 -// } -// -// H0 = a + H0 -// H1 = b + H1 -// H2 = c + H2 -// H3 = d + H3 -// H4 = e + H4 -// H5 = f + H5 -// H6 = g + H6 -// H7 = h + H7 - -// Wt = Mt; for 0 <= t <= 15 -#define MSGSCHEDULE0(index) \ - MOVL (index*4)(SI), AX; \ - BSWAPL AX; \ - MOVL AX, (index*4)(BP) - -// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 -// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x) -// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x) -#define MSGSCHEDULE1(index) \ - MOVL ((index-2)*4)(BP), AX; \ - MOVL AX, CX; \ - RORL $17, AX; \ - MOVL CX, DX; \ - RORL $19, CX; \ - SHRL $10, DX; \ - MOVL ((index-15)*4)(BP), BX; \ - XORL CX, AX; \ - MOVL BX, CX; \ - XORL DX, AX; \ - RORL $7, BX; \ - MOVL CX, DX; \ - SHRL $3, DX; \ - RORL $18, CX; \ - ADDL ((index-7)*4)(BP), AX; \ - XORL CX, BX; \ - XORL DX, BX; \ - ADDL ((index-16)*4)(BP), BX; \ - ADDL BX, AX; \ - MOVL AX, ((index)*4)(BP) - -// Calculate T1 in AX - uses AX, CX and DX registers. -// h is also used as an accumulator. Wt is passed in AX. -// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt -// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x) -// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) -#define SHA256T1(const, e, f, g, h) \ - ADDL AX, h; \ - MOVL e, AX; \ - ADDL $const, h; \ - MOVL e, CX; \ - RORL $6, AX; \ - MOVL e, DX; \ - RORL $11, CX; \ - XORL CX, AX; \ - MOVL e, CX; \ - RORL $25, DX; \ - ANDL f, CX; \ - XORL AX, DX; \ - MOVL e, AX; \ - NOTL AX; \ - ADDL DX, h; \ - ANDL g, AX; \ - XORL CX, AX; \ - ADDL h, AX - -// Calculate T2 in BX - uses BX, CX, DX and DI registers. -// T2 = BIGSIGMA0(a) + Maj(a, b, c) -// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x) -// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) -#define SHA256T2(a, b, c) \ - MOVL a, DI; \ - MOVL c, BX; \ - RORL $2, DI; \ - MOVL a, DX; \ - ANDL b, BX; \ - RORL $13, DX; \ - MOVL a, CX; \ - ANDL c, CX; \ - XORL DX, DI; \ - XORL CX, BX; \ - MOVL a, DX; \ - MOVL b, CX; \ - RORL $22, DX; \ - ANDL a, CX; \ - XORL CX, BX; \ - XORL DX, DI; \ - ADDL DI, BX - -// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. -// The values for e and a are stored in d and h, ready for rotation. -#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \ - SHA256T1(const, e, f, g, h); \ - SHA256T2(a, b, c); \ - MOVL BX, h; \ - ADDL AX, d; \ - ADDL AX, h - -#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE0(index); \ - SHA256ROUND(index, const, a, b, c, d, e, f, g, h) - -#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE1(index); \ - SHA256ROUND(index, const, a, b, c, d, e, f, g, h) - - -// Definitions for AVX2 version - -// addm (mem), reg -// Add reg to mem using reg-mem add and store -#define addm(P1, P2) \ - ADDL P2, P1; \ - MOVL P1, P2 - -#define XDWORD0 Y4 -#define XDWORD1 Y5 -#define XDWORD2 Y6 -#define XDWORD3 Y7 - -#define XWORD0 X4 -#define XWORD1 X5 -#define XWORD2 X6 -#define XWORD3 X7 - -#define XTMP0 Y0 -#define XTMP1 Y1 -#define XTMP2 Y2 -#define XTMP3 Y3 -#define XTMP4 Y8 -#define XTMP5 Y11 - -#define XFER Y9 - -#define BYTE_FLIP_MASK Y13 // mask to convert LE -> BE -#define X_BYTE_FLIP_MASK X13 - -#define NUM_BYTES DX -#define INP DI - -#define CTX SI // Beginning of digest in memory (a, b, c, ... , h) - -#define a AX -#define b BX -#define c CX -#define d R8 -#define e DX -#define f R9 -#define g R10 -#define h R11 - -#define old_h R11 - -#define TBL BP - -#define SRND SI // SRND is same register as CTX - -#define T1 R12 - -#define y0 R13 -#define y1 R14 -#define y2 R15 -#define y3 DI - -// Offsets -#define XFER_SIZE 2*64*4 -#define INP_END_SIZE 8 -#define INP_SIZE 8 -#define TMP_SIZE 4 - -#define _XFER 0 -#define _INP_END _XFER + XFER_SIZE -#define _INP _INP_END + INP_END_SIZE -#define _TMP _INP + INP_SIZE -#define STACK_SIZE _TMP + TMP_SIZE - -#define ROUND_AND_SCHED_N_0(disp, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \ - ; \ // ############################# RND N + 0 ############################// - MOVL a, y3; \ // y3 = a // MAJA - RORXL $25, e, y0; \ // y0 = e >> 25 // S1A - RORXL $11, e, y1; \ // y1 = e >> 11 // S1B - ; \ - ADDL (disp + 0*4)(SP)(SRND*1), h; \ // h = k + w + h // disp = k + w - ORL c, y3; \ // y3 = a|c // MAJA - VPALIGNR $4, XDWORD2, XDWORD3, XTMP0; \ // XTMP0 = W[-7] - MOVL f, y2; \ // y2 = f // CH - RORXL $13, a, T1; \ // T1 = a >> 13 // S0B - ; \ - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 - XORL g, y2; \ // y2 = f^g // CH - VPADDD XDWORD0, XTMP0, XTMP0; \ // XTMP0 = W[-7] + W[-16] // y1 = (e >> 6) // S1 - RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 - ; \ - ANDL e, y2; \ // y2 = (f^g)&e // CH - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 - RORXL $22, a, y1; \ // y1 = a >> 22 // S0A - ADDL h, d; \ // d = k + w + h + d // -- - ; \ - ANDL b, y3; \ // y3 = (a|c)&b // MAJA - VPALIGNR $4, XDWORD0, XDWORD1, XTMP1; \ // XTMP1 = W[-15] - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 - RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 - ; \ - XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH - VPSRLD $7, XTMP1, XTMP2; \ - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 - MOVL a, T1; \ // T1 = a // MAJB - ANDL c, T1; \ // T1 = a&c // MAJB - ; \ - ADDL y0, y2; \ // y2 = S1 + CH // -- - VPSLLD $(32-7), XTMP1, XTMP3; \ - ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ - ADDL y1, h; \ // h = k + w + h + S0 // -- - ; \ - ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // -- - VPOR XTMP2, XTMP3, XTMP3; \ // XTMP3 = W[-15] ror 7 - ; \ - VPSRLD $18, XTMP1, XTMP2; \ - ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- - ADDL y3, h // h = t1 + S0 + MAJ // -- - -#define ROUND_AND_SCHED_N_1(disp, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \ - ; \ // ################################### RND N + 1 ############################ - ; \ - MOVL a, y3; \ // y3 = a // MAJA - RORXL $25, e, y0; \ // y0 = e >> 25 // S1A - RORXL $11, e, y1; \ // y1 = e >> 11 // S1B - ADDL (disp + 1*4)(SP)(SRND*1), h; \ // h = k + w + h // -- - ORL c, y3; \ // y3 = a|c // MAJA - ; \ - VPSRLD $3, XTMP1, XTMP4; \ // XTMP4 = W[-15] >> 3 - MOVL f, y2; \ // y2 = f // CH - RORXL $13, a, T1; \ // T1 = a >> 13 // S0B - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 - XORL g, y2; \ // y2 = f^g // CH - ; \ - RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 - RORXL $22, a, y1; \ // y1 = a >> 22 // S0A - ANDL e, y2; \ // y2 = (f^g)&e // CH - ADDL h, d; \ // d = k + w + h + d // -- - ; \ - VPSLLD $(32-18), XTMP1, XTMP1; \ - ANDL b, y3; \ // y3 = (a|c)&b // MAJA - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 - ; \ - VPXOR XTMP1, XTMP3, XTMP3; \ - RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 - XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH - ; \ - VPXOR XTMP2, XTMP3, XTMP3; \ // XTMP3 = W[-15] ror 7 ^ W[-15] ror 18 - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 - MOVL a, T1; \ // T1 = a // MAJB - ANDL c, T1; \ // T1 = a&c // MAJB - ADDL y0, y2; \ // y2 = S1 + CH // -- - ; \ - VPXOR XTMP4, XTMP3, XTMP1; \ // XTMP1 = s0 - VPSHUFD $0xFA, XDWORD3, XTMP2; \ // XTMP2 = W[-2] {BBAA} - ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ - ADDL y1, h; \ // h = k + w + h + S0 // -- - ; \ - VPADDD XTMP1, XTMP0, XTMP0; \ // XTMP0 = W[-16] + W[-7] + s0 - ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // -- - ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- - ADDL y3, h; \ // h = t1 + S0 + MAJ // -- - ; \ - VPSRLD $10, XTMP2, XTMP4 // XTMP4 = W[-2] >> 10 {BBAA} - -#define ROUND_AND_SCHED_N_2(disp, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \ - ; \ // ################################### RND N + 2 ############################ - ; \ - MOVL a, y3; \ // y3 = a // MAJA - RORXL $25, e, y0; \ // y0 = e >> 25 // S1A - ADDL (disp + 2*4)(SP)(SRND*1), h; \ // h = k + w + h // -- - ; \ - VPSRLQ $19, XTMP2, XTMP3; \ // XTMP3 = W[-2] ror 19 {xBxA} - RORXL $11, e, y1; \ // y1 = e >> 11 // S1B - ORL c, y3; \ // y3 = a|c // MAJA - MOVL f, y2; \ // y2 = f // CH - XORL g, y2; \ // y2 = f^g // CH - ; \ - RORXL $13, a, T1; \ // T1 = a >> 13 // S0B - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 - VPSRLQ $17, XTMP2, XTMP2; \ // XTMP2 = W[-2] ror 17 {xBxA} - ANDL e, y2; \ // y2 = (f^g)&e // CH - ; \ - RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 - VPXOR XTMP3, XTMP2, XTMP2; \ - ADDL h, d; \ // d = k + w + h + d // -- - ANDL b, y3; \ // y3 = (a|c)&b // MAJA - ; \ - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 - RORXL $22, a, y1; \ // y1 = a >> 22 // S0A - VPXOR XTMP2, XTMP4, XTMP4; \ // XTMP4 = s1 {xBxA} - XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH - ; \ - MOVL f, _TMP(SP); \ - MOVQ $shuff_00BA<>(SB), f; \ // f is used to keep SHUF_00BA - VPSHUFB (f), XTMP4, XTMP4; \ // XTMP4 = s1 {00BA} - MOVL _TMP(SP), f; \ // f is restored - ; \ - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 - RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 - VPADDD XTMP4, XTMP0, XTMP0; \ // XTMP0 = {..., ..., W[1], W[0]} - ; \ - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 - MOVL a, T1; \ // T1 = a // MAJB - ANDL c, T1; \ // T1 = a&c // MAJB - ADDL y0, y2; \ // y2 = S1 + CH // -- - VPSHUFD $80, XTMP0, XTMP2; \ // XTMP2 = W[-2] {DDCC} - ; \ - ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ - ADDL y1, h; \ // h = k + w + h + S0 // -- - ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // -- - ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- - ; \ - ADDL y3, h // h = t1 + S0 + MAJ // -- - -#define ROUND_AND_SCHED_N_3(disp, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \ - ; \ // ################################### RND N + 3 ############################ - ; \ - MOVL a, y3; \ // y3 = a // MAJA - RORXL $25, e, y0; \ // y0 = e >> 25 // S1A - RORXL $11, e, y1; \ // y1 = e >> 11 // S1B - ADDL (disp + 3*4)(SP)(SRND*1), h; \ // h = k + w + h // -- - ORL c, y3; \ // y3 = a|c // MAJA - ; \ - VPSRLD $10, XTMP2, XTMP5; \ // XTMP5 = W[-2] >> 10 {DDCC} - MOVL f, y2; \ // y2 = f // CH - RORXL $13, a, T1; \ // T1 = a >> 13 // S0B - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 - XORL g, y2; \ // y2 = f^g // CH - ; \ - VPSRLQ $19, XTMP2, XTMP3; \ // XTMP3 = W[-2] ror 19 {xDxC} - RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 - ANDL e, y2; \ // y2 = (f^g)&e // CH - ADDL h, d; \ // d = k + w + h + d // -- - ANDL b, y3; \ // y3 = (a|c)&b // MAJA - ; \ - VPSRLQ $17, XTMP2, XTMP2; \ // XTMP2 = W[-2] ror 17 {xDxC} - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 - XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH - ; \ - VPXOR XTMP3, XTMP2, XTMP2; \ - RORXL $22, a, y1; \ // y1 = a >> 22 // S0A - ADDL y0, y2; \ // y2 = S1 + CH // -- - ; \ - VPXOR XTMP2, XTMP5, XTMP5; \ // XTMP5 = s1 {xDxC} - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 - ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // -- - ; \ - RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 - ; \ - MOVL f, _TMP(SP); \ // Save f - MOVQ $shuff_DC00<>(SB), f; \ // SHUF_00DC - VPSHUFB (f), XTMP5, XTMP5; \ // XTMP5 = s1 {DC00} - MOVL _TMP(SP), f; \ // Restore f - ; \ - VPADDD XTMP0, XTMP5, XDWORD0; \ // XDWORD0 = {W[3], W[2], W[1], W[0]} - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 - MOVL a, T1; \ // T1 = a // MAJB - ANDL c, T1; \ // T1 = a&c // MAJB - ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ - ; \ - ADDL y1, h; \ // h = k + w + h + S0 // -- - ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- - ADDL y3, h // h = t1 + S0 + MAJ // -- - -#define DO_ROUND_N_0(disp, a, b, c, d, e, f, g, h, old_h) \ - ; \ // ################################### RND N + 0 ########################### - MOVL f, y2; \ // y2 = f // CH - RORXL $25, e, y0; \ // y0 = e >> 25 // S1A - RORXL $11, e, y1; \ // y1 = e >> 11 // S1B - XORL g, y2; \ // y2 = f^g // CH - ; \ - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 - RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 - ANDL e, y2; \ // y2 = (f^g)&e // CH - ; \ - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 - RORXL $13, a, T1; \ // T1 = a >> 13 // S0B - XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH - RORXL $22, a, y1; \ // y1 = a >> 22 // S0A - MOVL a, y3; \ // y3 = a // MAJA - ; \ - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 - RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 - ADDL (disp + 0*4)(SP)(SRND*1), h; \ // h = k + w + h // -- - ORL c, y3; \ // y3 = a|c // MAJA - ; \ - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 - MOVL a, T1; \ // T1 = a // MAJB - ANDL b, y3; \ // y3 = (a|c)&b // MAJA - ANDL c, T1; \ // T1 = a&c // MAJB - ADDL y0, y2; \ // y2 = S1 + CH // -- - ; \ - ADDL h, d; \ // d = k + w + h + d // -- - ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ - ADDL y1, h; \ // h = k + w + h + S0 // -- - ADDL y2, d // d = k + w + h + d + S1 + CH = d + t1 // -- - -#define DO_ROUND_N_1(disp, a, b, c, d, e, f, g, h, old_h) \ - ; \ // ################################### RND N + 1 ########################### - ADDL y2, old_h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0 // -- - MOVL f, y2; \ // y2 = f // CH - RORXL $25, e, y0; \ // y0 = e >> 25 // S1A - RORXL $11, e, y1; \ // y1 = e >> 11 // S1B - XORL g, y2; \ // y2 = f^g // CH - ; \ - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 - RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 - ANDL e, y2; \ // y2 = (f^g)&e // CH - ADDL y3, old_h; \ // h = t1 + S0 + MAJ // -- - ; \ - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 - RORXL $13, a, T1; \ // T1 = a >> 13 // S0B - XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH - RORXL $22, a, y1; \ // y1 = a >> 22 // S0A - MOVL a, y3; \ // y3 = a // MAJA - ; \ - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 - RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 - ADDL (disp + 1*4)(SP)(SRND*1), h; \ // h = k + w + h // -- - ORL c, y3; \ // y3 = a|c // MAJA - ; \ - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 - MOVL a, T1; \ // T1 = a // MAJB - ANDL b, y3; \ // y3 = (a|c)&b // MAJA - ANDL c, T1; \ // T1 = a&c // MAJB - ADDL y0, y2; \ // y2 = S1 + CH // -- - ; \ - ADDL h, d; \ // d = k + w + h + d // -- - ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ - ADDL y1, h; \ // h = k + w + h + S0 // -- - ; \ - ADDL y2, d // d = k + w + h + d + S1 + CH = d + t1 // -- - -#define DO_ROUND_N_2(disp, a, b, c, d, e, f, g, h, old_h) \ - ; \ // ################################### RND N + 2 ############################## - ADDL y2, old_h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- - MOVL f, y2; \ // y2 = f // CH - RORXL $25, e, y0; \ // y0 = e >> 25 // S1A - RORXL $11, e, y1; \ // y1 = e >> 11 // S1B - XORL g, y2; \ // y2 = f^g // CH - ; \ - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 - RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 - ANDL e, y2; \ // y2 = (f^g)&e // CH - ADDL y3, old_h; \ // h = t1 + S0 + MAJ // -- - ; \ - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 - RORXL $13, a, T1; \ // T1 = a >> 13 // S0B - XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH - RORXL $22, a, y1; \ // y1 = a >> 22 // S0A - MOVL a, y3; \ // y3 = a // MAJA - ; \ - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 - RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 - ADDL (disp + 2*4)(SP)(SRND*1), h; \ // h = k + w + h // -- - ORL c, y3; \ // y3 = a|c // MAJA - ; \ - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 - MOVL a, T1; \ // T1 = a // MAJB - ANDL b, y3; \ // y3 = (a|c)&b // MAJA - ANDL c, T1; \ // T1 = a&c // MAJB - ADDL y0, y2; \ // y2 = S1 + CH // -- - ; \ - ADDL h, d; \ // d = k + w + h + d // -- - ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ - ADDL y1, h; \ // h = k + w + h + S0 // -- - ; \ - ADDL y2, d // d = k + w + h + d + S1 + CH = d + t1 // -- - -#define DO_ROUND_N_3(disp, a, b, c, d, e, f, g, h, old_h) \ - ; \ // ################################### RND N + 3 ########################### - ADDL y2, old_h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- - MOVL f, y2; \ // y2 = f // CH - RORXL $25, e, y0; \ // y0 = e >> 25 // S1A - RORXL $11, e, y1; \ // y1 = e >> 11 // S1B - XORL g, y2; \ // y2 = f^g // CH - ; \ - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) // S1 - RORXL $6, e, y1; \ // y1 = (e >> 6) // S1 - ANDL e, y2; \ // y2 = (f^g)&e // CH - ADDL y3, old_h; \ // h = t1 + S0 + MAJ // -- - ; \ - XORL y1, y0; \ // y0 = (e>>25) ^ (e>>11) ^ (e>>6) // S1 - RORXL $13, a, T1; \ // T1 = a >> 13 // S0B - XORL g, y2; \ // y2 = CH = ((f^g)&e)^g // CH - RORXL $22, a, y1; \ // y1 = a >> 22 // S0A - MOVL a, y3; \ // y3 = a // MAJA - ; \ - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) // S0 - RORXL $2, a, T1; \ // T1 = (a >> 2) // S0 - ADDL (disp + 3*4)(SP)(SRND*1), h; \ // h = k + w + h // -- - ORL c, y3; \ // y3 = a|c // MAJA - ; \ - XORL T1, y1; \ // y1 = (a>>22) ^ (a>>13) ^ (a>>2) // S0 - MOVL a, T1; \ // T1 = a // MAJB - ANDL b, y3; \ // y3 = (a|c)&b // MAJA - ANDL c, T1; \ // T1 = a&c // MAJB - ADDL y0, y2; \ // y2 = S1 + CH // -- - ; \ - ADDL h, d; \ // d = k + w + h + d // -- - ORL T1, y3; \ // y3 = MAJ = (a|c)&b)|(a&c) // MAJ - ADDL y1, h; \ // h = k + w + h + S0 // -- - ; \ - ADDL y2, d; \ // d = k + w + h + d + S1 + CH = d + t1 // -- - ; \ - ADDL y2, h; \ // h = k + w + h + S0 + S1 + CH = t1 + S0// -- - ; \ - ADDL y3, h // h = t1 + S0 + MAJ // -- - -TEXT ·block(SB), 0, $536-32 - CMPB runtime·support_avx2(SB), $0 - JE noavx2bmi2 - CMPB runtime·support_bmi2(SB), $1 // check for RORXL instruction - JE avx2 -noavx2bmi2: - - MOVQ p_base+8(FP), SI - MOVQ p_len+16(FP), DX - SHRQ $6, DX - SHLQ $6, DX - - LEAQ (SI)(DX*1), DI - MOVQ DI, 256(SP) - CMPQ SI, DI - JEQ end - - MOVQ dig+0(FP), BP - MOVL (0*4)(BP), R8 // a = H0 - MOVL (1*4)(BP), R9 // b = H1 - MOVL (2*4)(BP), R10 // c = H2 - MOVL (3*4)(BP), R11 // d = H3 - MOVL (4*4)(BP), R12 // e = H4 - MOVL (5*4)(BP), R13 // f = H5 - MOVL (6*4)(BP), R14 // g = H6 - MOVL (7*4)(BP), R15 // h = H7 - -loop: - MOVQ SP, BP - - SHA256ROUND0(0, 0x428a2f98, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND0(1, 0x71374491, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND0(2, 0xb5c0fbcf, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND0(3, 0xe9b5dba5, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND0(4, 0x3956c25b, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND0(5, 0x59f111f1, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND0(6, 0x923f82a4, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND0(7, 0xab1c5ed5, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND0(8, 0xd807aa98, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND0(9, 0x12835b01, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND0(10, 0x243185be, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND0(11, 0x550c7dc3, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND0(12, 0x72be5d74, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND0(13, 0x80deb1fe, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND0(14, 0x9bdc06a7, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND0(15, 0xc19bf174, R9, R10, R11, R12, R13, R14, R15, R8) - - SHA256ROUND1(16, 0xe49b69c1, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(17, 0xefbe4786, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(18, 0x0fc19dc6, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(19, 0x240ca1cc, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(20, 0x2de92c6f, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(21, 0x4a7484aa, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(22, 0x5cb0a9dc, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(23, 0x76f988da, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(24, 0x983e5152, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(25, 0xa831c66d, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(26, 0xb00327c8, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(27, 0xbf597fc7, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(28, 0xc6e00bf3, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(29, 0xd5a79147, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(30, 0x06ca6351, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(31, 0x14292967, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(32, 0x27b70a85, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(33, 0x2e1b2138, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(34, 0x4d2c6dfc, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(35, 0x53380d13, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(36, 0x650a7354, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(37, 0x766a0abb, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(38, 0x81c2c92e, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(39, 0x92722c85, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(40, 0xa2bfe8a1, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(41, 0xa81a664b, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(42, 0xc24b8b70, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(43, 0xc76c51a3, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(44, 0xd192e819, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(45, 0xd6990624, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(46, 0xf40e3585, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(47, 0x106aa070, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(48, 0x19a4c116, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(49, 0x1e376c08, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(50, 0x2748774c, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(51, 0x34b0bcb5, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(52, 0x391c0cb3, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(53, 0x4ed8aa4a, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(54, 0x5b9cca4f, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(55, 0x682e6ff3, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(56, 0x748f82ee, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(57, 0x78a5636f, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(58, 0x84c87814, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(59, 0x8cc70208, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(60, 0x90befffa, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(61, 0xa4506ceb, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(62, 0xbef9a3f7, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(63, 0xc67178f2, R9, R10, R11, R12, R13, R14, R15, R8) - - MOVQ dig+0(FP), BP - ADDL (0*4)(BP), R8 // H0 = a + H0 - MOVL R8, (0*4)(BP) - ADDL (1*4)(BP), R9 // H1 = b + H1 - MOVL R9, (1*4)(BP) - ADDL (2*4)(BP), R10 // H2 = c + H2 - MOVL R10, (2*4)(BP) - ADDL (3*4)(BP), R11 // H3 = d + H3 - MOVL R11, (3*4)(BP) - ADDL (4*4)(BP), R12 // H4 = e + H4 - MOVL R12, (4*4)(BP) - ADDL (5*4)(BP), R13 // H5 = f + H5 - MOVL R13, (5*4)(BP) - ADDL (6*4)(BP), R14 // H6 = g + H6 - MOVL R14, (6*4)(BP) - ADDL (7*4)(BP), R15 // H7 = h + H7 - MOVL R15, (7*4)(BP) - - ADDQ $64, SI - CMPQ SI, 256(SP) - JB loop - -end: - RET - -avx2: - MOVQ dig+0(FP), CTX // d.h[8] - MOVQ p_base+8(FP), INP - MOVQ p_len+16(FP), NUM_BYTES - - LEAQ -64(INP)(NUM_BYTES*1), NUM_BYTES // Pointer to the last block - MOVQ NUM_BYTES, _INP_END(SP) - - CMPQ NUM_BYTES, INP - JE avx2_only_one_block - - // Load initial digest - MOVL 0(CTX), a // a = H0 - MOVL 4(CTX), b // b = H1 - MOVL 8(CTX), c // c = H2 - MOVL 12(CTX), d // d = H3 - MOVL 16(CTX), e // e = H4 - MOVL 20(CTX), f // f = H5 - MOVL 24(CTX), g // g = H6 - MOVL 28(CTX), h // h = H7 - -avx2_loop0: // at each iteration works with one block (512 bit) - - VMOVDQU (0*32)(INP), XTMP0 - VMOVDQU (1*32)(INP), XTMP1 - VMOVDQU (2*32)(INP), XTMP2 - VMOVDQU (3*32)(INP), XTMP3 - - MOVQ $flip_mask<>(SB), BP // BYTE_FLIP_MASK - VMOVDQU (BP), BYTE_FLIP_MASK - - // Apply Byte Flip Mask: LE -> BE - VPSHUFB BYTE_FLIP_MASK, XTMP0, XTMP0 - VPSHUFB BYTE_FLIP_MASK, XTMP1, XTMP1 - VPSHUFB BYTE_FLIP_MASK, XTMP2, XTMP2 - VPSHUFB BYTE_FLIP_MASK, XTMP3, XTMP3 - - // Transpose data into high/low parts - VPERM2I128 $0x20, XTMP2, XTMP0, XDWORD0 // w3, w2, w1, w0 - VPERM2I128 $0x31, XTMP2, XTMP0, XDWORD1 // w7, w6, w5, w4 - VPERM2I128 $0x20, XTMP3, XTMP1, XDWORD2 // w11, w10, w9, w8 - VPERM2I128 $0x31, XTMP3, XTMP1, XDWORD3 // w15, w14, w13, w12 - - MOVQ $K256<>(SB), TBL // Loading address of table with round-specific constants - -avx2_last_block_enter: - ADDQ $64, INP - MOVQ INP, _INP(SP) - XORQ SRND, SRND - -avx2_loop1: // for w0 - w47 - // Do 4 rounds and scheduling - VPADDD 0*32(TBL)(SRND*1), XDWORD0, XFER - VMOVDQU XFER, (_XFER + 0*32)(SP)(SRND*1) - ROUND_AND_SCHED_N_0(_XFER + 0*32, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) - ROUND_AND_SCHED_N_1(_XFER + 0*32, h, a, b, c, d, e, f, g, XDWORD0, XDWORD1, XDWORD2, XDWORD3) - ROUND_AND_SCHED_N_2(_XFER + 0*32, g, h, a, b, c, d, e, f, XDWORD0, XDWORD1, XDWORD2, XDWORD3) - ROUND_AND_SCHED_N_3(_XFER + 0*32, f, g, h, a, b, c, d, e, XDWORD0, XDWORD1, XDWORD2, XDWORD3) - - // Do 4 rounds and scheduling - VPADDD 1*32(TBL)(SRND*1), XDWORD1, XFER - VMOVDQU XFER, (_XFER + 1*32)(SP)(SRND*1) - ROUND_AND_SCHED_N_0(_XFER + 1*32, e, f, g, h, a, b, c, d, XDWORD1, XDWORD2, XDWORD3, XDWORD0) - ROUND_AND_SCHED_N_1(_XFER + 1*32, d, e, f, g, h, a, b, c, XDWORD1, XDWORD2, XDWORD3, XDWORD0) - ROUND_AND_SCHED_N_2(_XFER + 1*32, c, d, e, f, g, h, a, b, XDWORD1, XDWORD2, XDWORD3, XDWORD0) - ROUND_AND_SCHED_N_3(_XFER + 1*32, b, c, d, e, f, g, h, a, XDWORD1, XDWORD2, XDWORD3, XDWORD0) - - // Do 4 rounds and scheduling - VPADDD 2*32(TBL)(SRND*1), XDWORD2, XFER - VMOVDQU XFER, (_XFER + 2*32)(SP)(SRND*1) - ROUND_AND_SCHED_N_0(_XFER + 2*32, a, b, c, d, e, f, g, h, XDWORD2, XDWORD3, XDWORD0, XDWORD1) - ROUND_AND_SCHED_N_1(_XFER + 2*32, h, a, b, c, d, e, f, g, XDWORD2, XDWORD3, XDWORD0, XDWORD1) - ROUND_AND_SCHED_N_2(_XFER + 2*32, g, h, a, b, c, d, e, f, XDWORD2, XDWORD3, XDWORD0, XDWORD1) - ROUND_AND_SCHED_N_3(_XFER + 2*32, f, g, h, a, b, c, d, e, XDWORD2, XDWORD3, XDWORD0, XDWORD1) - - // Do 4 rounds and scheduling - VPADDD 3*32(TBL)(SRND*1), XDWORD3, XFER - VMOVDQU XFER, (_XFER + 3*32)(SP)(SRND*1) - ROUND_AND_SCHED_N_0(_XFER + 3*32, e, f, g, h, a, b, c, d, XDWORD3, XDWORD0, XDWORD1, XDWORD2) - ROUND_AND_SCHED_N_1(_XFER + 3*32, d, e, f, g, h, a, b, c, XDWORD3, XDWORD0, XDWORD1, XDWORD2) - ROUND_AND_SCHED_N_2(_XFER + 3*32, c, d, e, f, g, h, a, b, XDWORD3, XDWORD0, XDWORD1, XDWORD2) - ROUND_AND_SCHED_N_3(_XFER + 3*32, b, c, d, e, f, g, h, a, XDWORD3, XDWORD0, XDWORD1, XDWORD2) - - ADDQ $4*32, SRND - CMPQ SRND, $3*4*32 - JB avx2_loop1 - -avx2_loop2: - // w48 - w63 processed with no scheduliung (last 16 rounds) - VPADDD 0*32(TBL)(SRND*1), XDWORD0, XFER - VMOVDQU XFER, (_XFER + 0*32)(SP)(SRND*1) - DO_ROUND_N_0(_XFER + 0*32, a, b, c, d, e, f, g, h, h) - DO_ROUND_N_1(_XFER + 0*32, h, a, b, c, d, e, f, g, h) - DO_ROUND_N_2(_XFER + 0*32, g, h, a, b, c, d, e, f, g) - DO_ROUND_N_3(_XFER + 0*32, f, g, h, a, b, c, d, e, f) - - VPADDD 1*32(TBL)(SRND*1), XDWORD1, XFER - VMOVDQU XFER, (_XFER + 1*32)(SP)(SRND*1) - DO_ROUND_N_0(_XFER + 1*32, e, f, g, h, a, b, c, d, e) - DO_ROUND_N_1(_XFER + 1*32, d, e, f, g, h, a, b, c, d) - DO_ROUND_N_2(_XFER + 1*32, c, d, e, f, g, h, a, b, c) - DO_ROUND_N_3(_XFER + 1*32, b, c, d, e, f, g, h, a, b) - - ADDQ $2*32, SRND - - VMOVDQU XDWORD2, XDWORD0 - VMOVDQU XDWORD3, XDWORD1 - - CMPQ SRND, $4*4*32 - JB avx2_loop2 - - MOVQ dig+0(FP), CTX // d.h[8] - MOVQ _INP(SP), INP - - addm( 0(CTX), a) - addm( 4(CTX), b) - addm( 8(CTX), c) - addm( 12(CTX), d) - addm( 16(CTX), e) - addm( 20(CTX), f) - addm( 24(CTX), g) - addm( 28(CTX), h) - - CMPQ _INP_END(SP), INP - JB done_hash - - XORQ SRND, SRND - -avx2_loop3: // Do second block using previously scheduled results - DO_ROUND_N_0(_XFER + 0*32 + 16, a, b, c, d, e, f, g, h, a) - DO_ROUND_N_1(_XFER + 0*32 + 16, h, a, b, c, d, e, f, g, h) - DO_ROUND_N_2(_XFER + 0*32 + 16, g, h, a, b, c, d, e, f, g) - DO_ROUND_N_3(_XFER + 0*32 + 16, f, g, h, a, b, c, d, e, f) - - DO_ROUND_N_0(_XFER + 1*32 + 16, e, f, g, h, a, b, c, d, e) - DO_ROUND_N_1(_XFER + 1*32 + 16, d, e, f, g, h, a, b, c, d) - DO_ROUND_N_2(_XFER + 1*32 + 16, c, d, e, f, g, h, a, b, c) - DO_ROUND_N_3(_XFER + 1*32 + 16, b, c, d, e, f, g, h, a, b) - - ADDQ $2*32, SRND - CMPQ SRND, $4*4*32 - JB avx2_loop3 - - MOVQ dig+0(FP), CTX // d.h[8] - MOVQ _INP(SP), INP - ADDQ $64, INP - - addm( 0(CTX), a) - addm( 4(CTX), b) - addm( 8(CTX), c) - addm( 12(CTX), d) - addm( 16(CTX), e) - addm( 20(CTX), f) - addm( 24(CTX), g) - addm( 28(CTX), h) - - CMPQ _INP_END(SP), INP - JA avx2_loop0 - JB done_hash - -avx2_do_last_block: - - VMOVDQU 0(INP), XWORD0 - VMOVDQU 16(INP), XWORD1 - VMOVDQU 32(INP), XWORD2 - VMOVDQU 48(INP), XWORD3 - - MOVQ $flip_mask<>(SB), BP - VMOVDQU (BP), X_BYTE_FLIP_MASK - - VPSHUFB X_BYTE_FLIP_MASK, XWORD0, XWORD0 - VPSHUFB X_BYTE_FLIP_MASK, XWORD1, XWORD1 - VPSHUFB X_BYTE_FLIP_MASK, XWORD2, XWORD2 - VPSHUFB X_BYTE_FLIP_MASK, XWORD3, XWORD3 - - MOVQ $K256<>(SB), TBL - - JMP avx2_last_block_enter - -avx2_only_one_block: - // Load initial digest - MOVL 0(CTX), a // a = H0 - MOVL 4(CTX), b // b = H1 - MOVL 8(CTX), c // c = H2 - MOVL 12(CTX), d // d = H3 - MOVL 16(CTX), e // e = H4 - MOVL 20(CTX), f // f = H5 - MOVL 24(CTX), g // g = H6 - MOVL 28(CTX), h // h = H7 - - JMP avx2_do_last_block - -done_hash: - VZEROUPPER - RET - -// shuffle byte order from LE to BE -DATA flip_mask<>+0x00(SB)/8, $0x0405060700010203 -DATA flip_mask<>+0x08(SB)/8, $0x0c0d0e0f08090a0b -DATA flip_mask<>+0x10(SB)/8, $0x0405060700010203 -DATA flip_mask<>+0x18(SB)/8, $0x0c0d0e0f08090a0b -GLOBL flip_mask<>(SB), 8, $32 - -// shuffle xBxA -> 00BA -DATA shuff_00BA<>+0x00(SB)/8, $0x0b0a090803020100 -DATA shuff_00BA<>+0x08(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA shuff_00BA<>+0x10(SB)/8, $0x0b0a090803020100 -DATA shuff_00BA<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF -GLOBL shuff_00BA<>(SB), 8, $32 - -// shuffle xDxC -> DC00 -DATA shuff_DC00<>+0x00(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA shuff_DC00<>+0x08(SB)/8, $0x0b0a090803020100 -DATA shuff_DC00<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA shuff_DC00<>+0x18(SB)/8, $0x0b0a090803020100 -GLOBL shuff_DC00<>(SB), 8, $32 - -// Round specific constants -DATA K256<>+0x00(SB)/4, $0x428a2f98 // k1 -DATA K256<>+0x04(SB)/4, $0x71374491 // k2 -DATA K256<>+0x08(SB)/4, $0xb5c0fbcf // k3 -DATA K256<>+0x0c(SB)/4, $0xe9b5dba5 // k4 -DATA K256<>+0x10(SB)/4, $0x428a2f98 // k1 -DATA K256<>+0x14(SB)/4, $0x71374491 // k2 -DATA K256<>+0x18(SB)/4, $0xb5c0fbcf // k3 -DATA K256<>+0x1c(SB)/4, $0xe9b5dba5 // k4 - -DATA K256<>+0x20(SB)/4, $0x3956c25b // k5 - k8 -DATA K256<>+0x24(SB)/4, $0x59f111f1 -DATA K256<>+0x28(SB)/4, $0x923f82a4 -DATA K256<>+0x2c(SB)/4, $0xab1c5ed5 -DATA K256<>+0x30(SB)/4, $0x3956c25b -DATA K256<>+0x34(SB)/4, $0x59f111f1 -DATA K256<>+0x38(SB)/4, $0x923f82a4 -DATA K256<>+0x3c(SB)/4, $0xab1c5ed5 - -DATA K256<>+0x40(SB)/4, $0xd807aa98 // k9 - k12 -DATA K256<>+0x44(SB)/4, $0x12835b01 -DATA K256<>+0x48(SB)/4, $0x243185be -DATA K256<>+0x4c(SB)/4, $0x550c7dc3 -DATA K256<>+0x50(SB)/4, $0xd807aa98 -DATA K256<>+0x54(SB)/4, $0x12835b01 -DATA K256<>+0x58(SB)/4, $0x243185be -DATA K256<>+0x5c(SB)/4, $0x550c7dc3 - -DATA K256<>+0x60(SB)/4, $0x72be5d74 // k13 - k16 -DATA K256<>+0x64(SB)/4, $0x80deb1fe -DATA K256<>+0x68(SB)/4, $0x9bdc06a7 -DATA K256<>+0x6c(SB)/4, $0xc19bf174 -DATA K256<>+0x70(SB)/4, $0x72be5d74 -DATA K256<>+0x74(SB)/4, $0x80deb1fe -DATA K256<>+0x78(SB)/4, $0x9bdc06a7 -DATA K256<>+0x7c(SB)/4, $0xc19bf174 - -DATA K256<>+0x80(SB)/4, $0xe49b69c1 // k17 - k20 -DATA K256<>+0x84(SB)/4, $0xefbe4786 -DATA K256<>+0x88(SB)/4, $0x0fc19dc6 -DATA K256<>+0x8c(SB)/4, $0x240ca1cc -DATA K256<>+0x90(SB)/4, $0xe49b69c1 -DATA K256<>+0x94(SB)/4, $0xefbe4786 -DATA K256<>+0x98(SB)/4, $0x0fc19dc6 -DATA K256<>+0x9c(SB)/4, $0x240ca1cc - -DATA K256<>+0xa0(SB)/4, $0x2de92c6f // k21 - k24 -DATA K256<>+0xa4(SB)/4, $0x4a7484aa -DATA K256<>+0xa8(SB)/4, $0x5cb0a9dc -DATA K256<>+0xac(SB)/4, $0x76f988da -DATA K256<>+0xb0(SB)/4, $0x2de92c6f -DATA K256<>+0xb4(SB)/4, $0x4a7484aa -DATA K256<>+0xb8(SB)/4, $0x5cb0a9dc -DATA K256<>+0xbc(SB)/4, $0x76f988da - -DATA K256<>+0xc0(SB)/4, $0x983e5152 // k25 - k28 -DATA K256<>+0xc4(SB)/4, $0xa831c66d -DATA K256<>+0xc8(SB)/4, $0xb00327c8 -DATA K256<>+0xcc(SB)/4, $0xbf597fc7 -DATA K256<>+0xd0(SB)/4, $0x983e5152 -DATA K256<>+0xd4(SB)/4, $0xa831c66d -DATA K256<>+0xd8(SB)/4, $0xb00327c8 -DATA K256<>+0xdc(SB)/4, $0xbf597fc7 - -DATA K256<>+0xe0(SB)/4, $0xc6e00bf3 // k29 - k32 -DATA K256<>+0xe4(SB)/4, $0xd5a79147 -DATA K256<>+0xe8(SB)/4, $0x06ca6351 -DATA K256<>+0xec(SB)/4, $0x14292967 -DATA K256<>+0xf0(SB)/4, $0xc6e00bf3 -DATA K256<>+0xf4(SB)/4, $0xd5a79147 -DATA K256<>+0xf8(SB)/4, $0x06ca6351 -DATA K256<>+0xfc(SB)/4, $0x14292967 - -DATA K256<>+0x100(SB)/4, $0x27b70a85 -DATA K256<>+0x104(SB)/4, $0x2e1b2138 -DATA K256<>+0x108(SB)/4, $0x4d2c6dfc -DATA K256<>+0x10c(SB)/4, $0x53380d13 -DATA K256<>+0x110(SB)/4, $0x27b70a85 -DATA K256<>+0x114(SB)/4, $0x2e1b2138 -DATA K256<>+0x118(SB)/4, $0x4d2c6dfc -DATA K256<>+0x11c(SB)/4, $0x53380d13 - -DATA K256<>+0x120(SB)/4, $0x650a7354 -DATA K256<>+0x124(SB)/4, $0x766a0abb -DATA K256<>+0x128(SB)/4, $0x81c2c92e -DATA K256<>+0x12c(SB)/4, $0x92722c85 -DATA K256<>+0x130(SB)/4, $0x650a7354 -DATA K256<>+0x134(SB)/4, $0x766a0abb -DATA K256<>+0x138(SB)/4, $0x81c2c92e -DATA K256<>+0x13c(SB)/4, $0x92722c85 - -DATA K256<>+0x140(SB)/4, $0xa2bfe8a1 -DATA K256<>+0x144(SB)/4, $0xa81a664b -DATA K256<>+0x148(SB)/4, $0xc24b8b70 -DATA K256<>+0x14c(SB)/4, $0xc76c51a3 -DATA K256<>+0x150(SB)/4, $0xa2bfe8a1 -DATA K256<>+0x154(SB)/4, $0xa81a664b -DATA K256<>+0x158(SB)/4, $0xc24b8b70 -DATA K256<>+0x15c(SB)/4, $0xc76c51a3 - -DATA K256<>+0x160(SB)/4, $0xd192e819 -DATA K256<>+0x164(SB)/4, $0xd6990624 -DATA K256<>+0x168(SB)/4, $0xf40e3585 -DATA K256<>+0x16c(SB)/4, $0x106aa070 -DATA K256<>+0x170(SB)/4, $0xd192e819 -DATA K256<>+0x174(SB)/4, $0xd6990624 -DATA K256<>+0x178(SB)/4, $0xf40e3585 -DATA K256<>+0x17c(SB)/4, $0x106aa070 - -DATA K256<>+0x180(SB)/4, $0x19a4c116 -DATA K256<>+0x184(SB)/4, $0x1e376c08 -DATA K256<>+0x188(SB)/4, $0x2748774c -DATA K256<>+0x18c(SB)/4, $0x34b0bcb5 -DATA K256<>+0x190(SB)/4, $0x19a4c116 -DATA K256<>+0x194(SB)/4, $0x1e376c08 -DATA K256<>+0x198(SB)/4, $0x2748774c -DATA K256<>+0x19c(SB)/4, $0x34b0bcb5 - -DATA K256<>+0x1a0(SB)/4, $0x391c0cb3 -DATA K256<>+0x1a4(SB)/4, $0x4ed8aa4a -DATA K256<>+0x1a8(SB)/4, $0x5b9cca4f -DATA K256<>+0x1ac(SB)/4, $0x682e6ff3 -DATA K256<>+0x1b0(SB)/4, $0x391c0cb3 -DATA K256<>+0x1b4(SB)/4, $0x4ed8aa4a -DATA K256<>+0x1b8(SB)/4, $0x5b9cca4f -DATA K256<>+0x1bc(SB)/4, $0x682e6ff3 - -DATA K256<>+0x1c0(SB)/4, $0x748f82ee -DATA K256<>+0x1c4(SB)/4, $0x78a5636f -DATA K256<>+0x1c8(SB)/4, $0x84c87814 -DATA K256<>+0x1cc(SB)/4, $0x8cc70208 -DATA K256<>+0x1d0(SB)/4, $0x748f82ee -DATA K256<>+0x1d4(SB)/4, $0x78a5636f -DATA K256<>+0x1d8(SB)/4, $0x84c87814 -DATA K256<>+0x1dc(SB)/4, $0x8cc70208 - -DATA K256<>+0x1e0(SB)/4, $0x90befffa -DATA K256<>+0x1e4(SB)/4, $0xa4506ceb -DATA K256<>+0x1e8(SB)/4, $0xbef9a3f7 -DATA K256<>+0x1ec(SB)/4, $0xc67178f2 -DATA K256<>+0x1f0(SB)/4, $0x90befffa -DATA K256<>+0x1f4(SB)/4, $0xa4506ceb -DATA K256<>+0x1f8(SB)/4, $0xbef9a3f7 -DATA K256<>+0x1fc(SB)/4, $0xc67178f2 - -GLOBL K256<>(SB), (NOPTR + RODATA), $512 diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go b/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go deleted file mode 100644 index fe07e53b..00000000 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 s390x ppc64le - -package sha256 - -//go:noescape - -func block(dig *digest, p []byte) diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block_generic.go b/vendor/github.com/stevvooe/resumable/sha256/sha256block_generic.go deleted file mode 100644 index a182a5ea..00000000 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256block_generic.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!386,!s390x,!ppc64le - -package sha256 - -var block = blockGeneric diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.go b/vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.go deleted file mode 100644 index b7beefef..00000000 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha256 - -// featureCheck reports whether the CPU supports the -// SHA256 compute intermediate message digest (KIMD) -// function code. -func featureCheck() bool - -var useAsm = featureCheck() diff --git a/vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.s b/vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.s deleted file mode 100644 index ee35991f..00000000 --- a/vendor/github.com/stevvooe/resumable/sha256/sha256block_s390x.s +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// func featureCheck() bool -TEXT ·featureCheck(SB),NOSPLIT,$16-1 - LA tmp-16(SP), R1 - XOR R0, R0 // query function code is 0 - WORD $0xB93E0006 // KIMD (R6 is ignored) - MOVBZ tmp-16(SP), R4 // get the first byte - AND $0x20, R4 // bit 2 (big endian) for SHA256 - CMPBEQ R4, $0, nosha256 - MOVB $1, ret+0(FP) - RET -nosha256: - MOVB $0, ret+0(FP) - RET - -// func block(dig *digest, p []byte) -TEXT ·block(SB),NOSPLIT,$0-32 - MOVBZ ·useAsm(SB), R4 - LMG dig+0(FP), R1, R3 // R2 = &p[0], R3 = len(p) - CMPBNE R4, $1, generic - MOVBZ $2, R0 // SHA256 function code -loop: - WORD $0xB93E0002 // KIMD R2 - BVS loop // continue if interrupted -done: - XOR R0, R0 // restore R0 - RET -generic: - BR ·blockGeneric(SB) diff --git a/vendor/github.com/stevvooe/resumable/sha512/resume.go b/vendor/github.com/stevvooe/resumable/sha512/resume.go deleted file mode 100644 index ed9173e5..00000000 --- a/vendor/github.com/stevvooe/resumable/sha512/resume.go +++ /dev/null @@ -1,63 +0,0 @@ -package sha512 - -import ( - "bytes" - "crypto" - "encoding/gob" - - "github.com/stevvooe/resumable" - - // import to ensure that our init function runs after the standard package - _ "crypto/sha512" -) - -// Len returns the number of bytes which have been written to the digest. -func (d *digest) Len() int64 { - return int64(d.len) -} - -// State returns a snapshot of the state of the digest. -func (d *digest) State() ([]byte, error) { - var buf bytes.Buffer - encoder := gob.NewEncoder(&buf) - - // We encode this way so that we do not have - // to export these fields of the digest struct. - vals := []interface{}{ - d.h, d.x, d.nx, d.len, d.function, - } - - for _, val := range vals { - if err := encoder.Encode(val); err != nil { - return nil, err - } - } - - return buf.Bytes(), nil -} - -// Restore resets the digest to the given state. -func (d *digest) Restore(state []byte) error { - decoder := gob.NewDecoder(bytes.NewReader(state)) - - // We decode this way so that we do not have - // to export these fields of the digest struct. - vals := []interface{}{ - &d.h, &d.x, &d.nx, &d.len, &d.function, - } - - for _, val := range vals { - if err := decoder.Decode(val); err != nil { - return err - } - } - - switch d.function { - case crypto.SHA384, crypto.SHA512, crypto.SHA512_224, crypto.SHA512_256: - break - default: - return resumable.ErrBadState - } - - return nil -} diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512.go b/vendor/github.com/stevvooe/resumable/sha512/sha512.go deleted file mode 100644 index 5603c90f..00000000 --- a/vendor/github.com/stevvooe/resumable/sha512/sha512.go +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sha512 implements the SHA-384, SHA-512, SHA-512/224, and SHA-512/256 -// hash algorithms as defined in FIPS 180-4. -package sha512 - -import ( - "crypto" - "hash" -) - -func init() { - crypto.RegisterHash(crypto.SHA384, New384) - crypto.RegisterHash(crypto.SHA512, New) - crypto.RegisterHash(crypto.SHA512_224, New512_224) - crypto.RegisterHash(crypto.SHA512_256, New512_256) -} - -const ( - // Size is the size, in bytes, of a SHA-512 checksum. - Size = 64 - - // Size224 is the size, in bytes, of a SHA-512/224 checksum. - Size224 = 28 - - // Size256 is the size, in bytes, of a SHA-512/256 checksum. - Size256 = 32 - - // Size384 is the size, in bytes, of a SHA-384 checksum. - Size384 = 48 - - // BlockSize is the block size, in bytes, of the SHA-512/224, - // SHA-512/256, SHA-384 and SHA-512 hash functions. - BlockSize = 128 -) - -const ( - chunk = 128 - init0 = 0x6a09e667f3bcc908 - init1 = 0xbb67ae8584caa73b - init2 = 0x3c6ef372fe94f82b - init3 = 0xa54ff53a5f1d36f1 - init4 = 0x510e527fade682d1 - init5 = 0x9b05688c2b3e6c1f - init6 = 0x1f83d9abfb41bd6b - init7 = 0x5be0cd19137e2179 - init0_224 = 0x8c3d37c819544da2 - init1_224 = 0x73e1996689dcd4d6 - init2_224 = 0x1dfab7ae32ff9c82 - init3_224 = 0x679dd514582f9fcf - init4_224 = 0x0f6d2b697bd44da8 - init5_224 = 0x77e36f7304c48942 - init6_224 = 0x3f9d85a86a1d36c8 - init7_224 = 0x1112e6ad91d692a1 - init0_256 = 0x22312194fc2bf72c - init1_256 = 0x9f555fa3c84c64c2 - init2_256 = 0x2393b86b6f53b151 - init3_256 = 0x963877195940eabd - init4_256 = 0x96283ee2a88effe3 - init5_256 = 0xbe5e1e2553863992 - init6_256 = 0x2b0199fc2c85b8aa - init7_256 = 0x0eb72ddc81c52ca2 - init0_384 = 0xcbbb9d5dc1059ed8 - init1_384 = 0x629a292a367cd507 - init2_384 = 0x9159015a3070dd17 - init3_384 = 0x152fecd8f70e5939 - init4_384 = 0x67332667ffc00b31 - init5_384 = 0x8eb44a8768581511 - init6_384 = 0xdb0c2e0d64f98fa7 - init7_384 = 0x47b5481dbefa4fa4 -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - h [8]uint64 - x [chunk]byte - nx int - len uint64 - function crypto.Hash -} - -func (d *digest) Reset() { - switch d.function { - case crypto.SHA384: - d.h[0] = init0_384 - d.h[1] = init1_384 - d.h[2] = init2_384 - d.h[3] = init3_384 - d.h[4] = init4_384 - d.h[5] = init5_384 - d.h[6] = init6_384 - d.h[7] = init7_384 - case crypto.SHA512_224: - d.h[0] = init0_224 - d.h[1] = init1_224 - d.h[2] = init2_224 - d.h[3] = init3_224 - d.h[4] = init4_224 - d.h[5] = init5_224 - d.h[6] = init6_224 - d.h[7] = init7_224 - case crypto.SHA512_256: - d.h[0] = init0_256 - d.h[1] = init1_256 - d.h[2] = init2_256 - d.h[3] = init3_256 - d.h[4] = init4_256 - d.h[5] = init5_256 - d.h[6] = init6_256 - d.h[7] = init7_256 - default: - d.h[0] = init0 - d.h[1] = init1 - d.h[2] = init2 - d.h[3] = init3 - d.h[4] = init4 - d.h[5] = init5 - d.h[6] = init6 - d.h[7] = init7 - } - d.nx = 0 - d.len = 0 -} - -// New returns a new hash.Hash computing the SHA-512 checksum. -func New() hash.Hash { - d := &digest{function: crypto.SHA512} - d.Reset() - return d -} - -// New512_224 returns a new hash.Hash computing the SHA-512/224 checksum. -func New512_224() hash.Hash { - d := &digest{function: crypto.SHA512_224} - d.Reset() - return d -} - -// New512_256 returns a new hash.Hash computing the SHA-512/256 checksum. -func New512_256() hash.Hash { - d := &digest{function: crypto.SHA512_256} - d.Reset() - return d -} - -// New384 returns a new hash.Hash computing the SHA-384 checksum. -func New384() hash.Hash { - d := &digest{function: crypto.SHA384} - d.Reset() - return d -} - -func (d *digest) Size() int { - switch d.function { - case crypto.SHA512_224: - return Size224 - case crypto.SHA512_256: - return Size256 - case crypto.SHA384: - return Size384 - default: - return Size - } -} - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == chunk { - block(d, d.x[:]) - d.nx = 0 - } - p = p[n:] - } - if len(p) >= chunk { - n := len(p) &^ (chunk - 1) - block(d, p[:n]) - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -func (d0 *digest) Sum(in []byte) []byte { - // Make a copy of d0 so that caller can keep writing and summing. - d := new(digest) - *d = *d0 - hash := d.checkSum() - switch d.function { - case crypto.SHA384: - return append(in, hash[:Size384]...) - case crypto.SHA512_224: - return append(in, hash[:Size224]...) - case crypto.SHA512_256: - return append(in, hash[:Size256]...) - default: - return append(in, hash[:]...) - } -} - -func (d *digest) checkSum() [Size]byte { - // Padding. Add a 1 bit and 0 bits until 112 bytes mod 128. - len := d.len - var tmp [128]byte - tmp[0] = 0x80 - if len%128 < 112 { - d.Write(tmp[0 : 112-len%128]) - } else { - d.Write(tmp[0 : 128+112-len%128]) - } - - // Length in bits. - len <<= 3 - for i := uint(0); i < 16; i++ { - tmp[i] = byte(len >> (120 - 8*i)) - } - d.Write(tmp[0:16]) - - if d.nx != 0 { - panic("d.nx != 0") - } - - h := d.h[:] - if d.function == crypto.SHA384 { - h = d.h[:6] - } - - var digest [Size]byte - for i, s := range h { - digest[i*8] = byte(s >> 56) - digest[i*8+1] = byte(s >> 48) - digest[i*8+2] = byte(s >> 40) - digest[i*8+3] = byte(s >> 32) - digest[i*8+4] = byte(s >> 24) - digest[i*8+5] = byte(s >> 16) - digest[i*8+6] = byte(s >> 8) - digest[i*8+7] = byte(s) - } - - return digest -} - -// Sum512 returns the SHA512 checksum of the data. -func Sum512(data []byte) [Size]byte { - d := digest{function: crypto.SHA512} - d.Reset() - d.Write(data) - return d.checkSum() -} - -// Sum384 returns the SHA384 checksum of the data. -func Sum384(data []byte) (sum384 [Size384]byte) { - d := digest{function: crypto.SHA384} - d.Reset() - d.Write(data) - sum := d.checkSum() - copy(sum384[:], sum[:Size384]) - return -} - -// Sum512_224 returns the Sum512/224 checksum of the data. -func Sum512_224(data []byte) (sum224 [Size224]byte) { - d := digest{function: crypto.SHA512_224} - d.Reset() - d.Write(data) - sum := d.checkSum() - copy(sum224[:], sum[:Size224]) - return -} - -// Sum512_256 returns the Sum512/256 checksum of the data. -func Sum512_256(data []byte) (sum256 [Size256]byte) { - d := digest{function: crypto.SHA512_256} - d.Reset() - d.Write(data) - sum := d.checkSum() - copy(sum256[:], sum[:Size256]) - return -} diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512block.go b/vendor/github.com/stevvooe/resumable/sha512/sha512block.go deleted file mode 100644 index 42e8d19f..00000000 --- a/vendor/github.com/stevvooe/resumable/sha512/sha512block.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// SHA512 block step. -// In its own file so that a faster assembly or C version -// can be substituted easily. - -package sha512 - -var _K = []uint64{ - 0x428a2f98d728ae22, - 0x7137449123ef65cd, - 0xb5c0fbcfec4d3b2f, - 0xe9b5dba58189dbbc, - 0x3956c25bf348b538, - 0x59f111f1b605d019, - 0x923f82a4af194f9b, - 0xab1c5ed5da6d8118, - 0xd807aa98a3030242, - 0x12835b0145706fbe, - 0x243185be4ee4b28c, - 0x550c7dc3d5ffb4e2, - 0x72be5d74f27b896f, - 0x80deb1fe3b1696b1, - 0x9bdc06a725c71235, - 0xc19bf174cf692694, - 0xe49b69c19ef14ad2, - 0xefbe4786384f25e3, - 0x0fc19dc68b8cd5b5, - 0x240ca1cc77ac9c65, - 0x2de92c6f592b0275, - 0x4a7484aa6ea6e483, - 0x5cb0a9dcbd41fbd4, - 0x76f988da831153b5, - 0x983e5152ee66dfab, - 0xa831c66d2db43210, - 0xb00327c898fb213f, - 0xbf597fc7beef0ee4, - 0xc6e00bf33da88fc2, - 0xd5a79147930aa725, - 0x06ca6351e003826f, - 0x142929670a0e6e70, - 0x27b70a8546d22ffc, - 0x2e1b21385c26c926, - 0x4d2c6dfc5ac42aed, - 0x53380d139d95b3df, - 0x650a73548baf63de, - 0x766a0abb3c77b2a8, - 0x81c2c92e47edaee6, - 0x92722c851482353b, - 0xa2bfe8a14cf10364, - 0xa81a664bbc423001, - 0xc24b8b70d0f89791, - 0xc76c51a30654be30, - 0xd192e819d6ef5218, - 0xd69906245565a910, - 0xf40e35855771202a, - 0x106aa07032bbd1b8, - 0x19a4c116b8d2d0c8, - 0x1e376c085141ab53, - 0x2748774cdf8eeb99, - 0x34b0bcb5e19b48a8, - 0x391c0cb3c5c95a63, - 0x4ed8aa4ae3418acb, - 0x5b9cca4f7763e373, - 0x682e6ff3d6b2b8a3, - 0x748f82ee5defb2fc, - 0x78a5636f43172f60, - 0x84c87814a1f0ab72, - 0x8cc702081a6439ec, - 0x90befffa23631e28, - 0xa4506cebde82bde9, - 0xbef9a3f7b2c67915, - 0xc67178f2e372532b, - 0xca273eceea26619c, - 0xd186b8c721c0c207, - 0xeada7dd6cde0eb1e, - 0xf57d4f7fee6ed178, - 0x06f067aa72176fba, - 0x0a637dc5a2c898a6, - 0x113f9804bef90dae, - 0x1b710b35131c471b, - 0x28db77f523047d84, - 0x32caab7b40c72493, - 0x3c9ebe0a15c9bebc, - 0x431d67c49c100d4c, - 0x4cc5d4becb3e42b6, - 0x597f299cfc657e2a, - 0x5fcb6fab3ad6faec, - 0x6c44198c4a475817, -} - -func blockGeneric(dig *digest, p []byte) { - var w [80]uint64 - h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] - for len(p) >= chunk { - for i := 0; i < 16; i++ { - j := i * 8 - w[i] = uint64(p[j])<<56 | uint64(p[j+1])<<48 | uint64(p[j+2])<<40 | uint64(p[j+3])<<32 | - uint64(p[j+4])<<24 | uint64(p[j+5])<<16 | uint64(p[j+6])<<8 | uint64(p[j+7]) - } - for i := 16; i < 80; i++ { - v1 := w[i-2] - t1 := (v1>>19 | v1<<(64-19)) ^ (v1>>61 | v1<<(64-61)) ^ (v1 >> 6) - v2 := w[i-15] - t2 := (v2>>1 | v2<<(64-1)) ^ (v2>>8 | v2<<(64-8)) ^ (v2 >> 7) - - w[i] = t1 + w[i-7] + t2 + w[i-16] - } - - a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 - - for i := 0; i < 80; i++ { - t1 := h + ((e>>14 | e<<(64-14)) ^ (e>>18 | e<<(64-18)) ^ (e>>41 | e<<(64-41))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] - - t2 := ((a>>28 | a<<(64-28)) ^ (a>>34 | a<<(64-34)) ^ (a>>39 | a<<(64-39))) + ((a & b) ^ (a & c) ^ (b & c)) - - h = g - g = f - f = e - e = d + t1 - d = c - c = b - b = a - a = t1 + t2 - } - - h0 += a - h1 += b - h2 += c - h3 += d - h4 += e - h5 += f - h6 += g - h7 += h - - p = p[chunk:] - } - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 -} diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s b/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s deleted file mode 100644 index 87502cdf..00000000 --- a/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// SHA512 block routine. See sha512block.go for Go equivalent. -// -// The algorithm is detailed in FIPS 180-4: -// -// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf -// -// Wt = Mt; for 0 <= t <= 15 -// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 -// -// a = H0 -// b = H1 -// c = H2 -// d = H3 -// e = H4 -// f = H5 -// g = H6 -// h = H7 -// -// for t = 0 to 79 { -// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt -// T2 = BIGSIGMA0(a) + Maj(a,b,c) -// h = g -// g = f -// f = e -// e = d + T1 -// d = c -// c = b -// b = a -// a = T1 + T2 -// } -// -// H0 = a + H0 -// H1 = b + H1 -// H2 = c + H2 -// H3 = d + H3 -// H4 = e + H4 -// H5 = f + H5 -// H6 = g + H6 -// H7 = h + H7 - -// Wt = Mt; for 0 <= t <= 15 -#define MSGSCHEDULE0(index) \ - MOVQ (index*8)(SI), AX; \ - BSWAPQ AX; \ - MOVQ AX, (index*8)(BP) - -// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 -// SIGMA0(x) = ROTR(1,x) XOR ROTR(8,x) XOR SHR(7,x) -// SIGMA1(x) = ROTR(19,x) XOR ROTR(61,x) XOR SHR(6,x) -#define MSGSCHEDULE1(index) \ - MOVQ ((index-2)*8)(BP), AX; \ - MOVQ AX, CX; \ - RORQ $19, AX; \ - MOVQ CX, DX; \ - RORQ $61, CX; \ - SHRQ $6, DX; \ - MOVQ ((index-15)*8)(BP), BX; \ - XORQ CX, AX; \ - MOVQ BX, CX; \ - XORQ DX, AX; \ - RORQ $1, BX; \ - MOVQ CX, DX; \ - SHRQ $7, DX; \ - RORQ $8, CX; \ - ADDQ ((index-7)*8)(BP), AX; \ - XORQ CX, BX; \ - XORQ DX, BX; \ - ADDQ ((index-16)*8)(BP), BX; \ - ADDQ BX, AX; \ - MOVQ AX, ((index)*8)(BP) - -// Calculate T1 in AX - uses AX, CX and DX registers. -// h is also used as an accumulator. Wt is passed in AX. -// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt -// BIGSIGMA1(x) = ROTR(14,x) XOR ROTR(18,x) XOR ROTR(41,x) -// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) -#define SHA512T1(const, e, f, g, h) \ - MOVQ $const, DX; \ - ADDQ AX, h; \ - MOVQ e, AX; \ - ADDQ DX, h; \ - MOVQ e, CX; \ - RORQ $14, AX; \ - MOVQ e, DX; \ - RORQ $18, CX; \ - XORQ CX, AX; \ - MOVQ e, CX; \ - RORQ $41, DX; \ - ANDQ f, CX; \ - XORQ AX, DX; \ - MOVQ e, AX; \ - NOTQ AX; \ - ADDQ DX, h; \ - ANDQ g, AX; \ - XORQ CX, AX; \ - ADDQ h, AX - -// Calculate T2 in BX - uses BX, CX, DX and DI registers. -// T2 = BIGSIGMA0(a) + Maj(a, b, c) -// BIGSIGMA0(x) = ROTR(28,x) XOR ROTR(34,x) XOR ROTR(39,x) -// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) -#define SHA512T2(a, b, c) \ - MOVQ a, DI; \ - MOVQ c, BX; \ - RORQ $28, DI; \ - MOVQ a, DX; \ - ANDQ b, BX; \ - RORQ $34, DX; \ - MOVQ a, CX; \ - ANDQ c, CX; \ - XORQ DX, DI; \ - XORQ CX, BX; \ - MOVQ a, DX; \ - MOVQ b, CX; \ - RORQ $39, DX; \ - ANDQ a, CX; \ - XORQ CX, BX; \ - XORQ DX, DI; \ - ADDQ DI, BX - -// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. -// The values for e and a are stored in d and h, ready for rotation. -#define SHA512ROUND(index, const, a, b, c, d, e, f, g, h) \ - SHA512T1(const, e, f, g, h); \ - SHA512T2(a, b, c); \ - MOVQ BX, h; \ - ADDQ AX, d; \ - ADDQ AX, h - -#define SHA512ROUND0(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE0(index); \ - SHA512ROUND(index, const, a, b, c, d, e, f, g, h) - -#define SHA512ROUND1(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE1(index); \ - SHA512ROUND(index, const, a, b, c, d, e, f, g, h) - -TEXT ·block(SB),0,$648-32 - MOVQ p_base+8(FP), SI - MOVQ p_len+16(FP), DX - SHRQ $7, DX - SHLQ $7, DX - - LEAQ (SI)(DX*1), DI - MOVQ DI, 640(SP) - CMPQ SI, DI - JEQ end - - MOVQ dig+0(FP), BP - MOVQ (0*8)(BP), R8 // a = H0 - MOVQ (1*8)(BP), R9 // b = H1 - MOVQ (2*8)(BP), R10 // c = H2 - MOVQ (3*8)(BP), R11 // d = H3 - MOVQ (4*8)(BP), R12 // e = H4 - MOVQ (5*8)(BP), R13 // f = H5 - MOVQ (6*8)(BP), R14 // g = H6 - MOVQ (7*8)(BP), R15 // h = H7 - -loop: - MOVQ SP, BP // message schedule - - SHA512ROUND0(0, 0x428a2f98d728ae22, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND0(1, 0x7137449123ef65cd, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND0(2, 0xb5c0fbcfec4d3b2f, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND0(3, 0xe9b5dba58189dbbc, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND0(4, 0x3956c25bf348b538, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND0(5, 0x59f111f1b605d019, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND0(6, 0x923f82a4af194f9b, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND0(7, 0xab1c5ed5da6d8118, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND0(8, 0xd807aa98a3030242, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND0(9, 0x12835b0145706fbe, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND0(10, 0x243185be4ee4b28c, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND0(11, 0x550c7dc3d5ffb4e2, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND0(12, 0x72be5d74f27b896f, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND0(13, 0x80deb1fe3b1696b1, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND0(14, 0x9bdc06a725c71235, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND0(15, 0xc19bf174cf692694, R9, R10, R11, R12, R13, R14, R15, R8) - - SHA512ROUND1(16, 0xe49b69c19ef14ad2, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(17, 0xefbe4786384f25e3, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(18, 0x0fc19dc68b8cd5b5, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(19, 0x240ca1cc77ac9c65, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(20, 0x2de92c6f592b0275, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(21, 0x4a7484aa6ea6e483, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(22, 0x5cb0a9dcbd41fbd4, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(23, 0x76f988da831153b5, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(24, 0x983e5152ee66dfab, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(25, 0xa831c66d2db43210, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(26, 0xb00327c898fb213f, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(27, 0xbf597fc7beef0ee4, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(28, 0xc6e00bf33da88fc2, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(29, 0xd5a79147930aa725, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(30, 0x06ca6351e003826f, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(31, 0x142929670a0e6e70, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(32, 0x27b70a8546d22ffc, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(33, 0x2e1b21385c26c926, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(34, 0x4d2c6dfc5ac42aed, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(35, 0x53380d139d95b3df, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(36, 0x650a73548baf63de, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(37, 0x766a0abb3c77b2a8, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(38, 0x81c2c92e47edaee6, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(39, 0x92722c851482353b, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(40, 0xa2bfe8a14cf10364, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(41, 0xa81a664bbc423001, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(42, 0xc24b8b70d0f89791, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(43, 0xc76c51a30654be30, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(44, 0xd192e819d6ef5218, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(45, 0xd69906245565a910, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(46, 0xf40e35855771202a, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(47, 0x106aa07032bbd1b8, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(48, 0x19a4c116b8d2d0c8, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(49, 0x1e376c085141ab53, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(50, 0x2748774cdf8eeb99, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(51, 0x34b0bcb5e19b48a8, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(52, 0x391c0cb3c5c95a63, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(53, 0x4ed8aa4ae3418acb, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(54, 0x5b9cca4f7763e373, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(55, 0x682e6ff3d6b2b8a3, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(56, 0x748f82ee5defb2fc, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(57, 0x78a5636f43172f60, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(58, 0x84c87814a1f0ab72, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(59, 0x8cc702081a6439ec, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(60, 0x90befffa23631e28, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(61, 0xa4506cebde82bde9, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(62, 0xbef9a3f7b2c67915, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(63, 0xc67178f2e372532b, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(64, 0xca273eceea26619c, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(65, 0xd186b8c721c0c207, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(66, 0xeada7dd6cde0eb1e, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(67, 0xf57d4f7fee6ed178, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(68, 0x06f067aa72176fba, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(69, 0x0a637dc5a2c898a6, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(70, 0x113f9804bef90dae, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(71, 0x1b710b35131c471b, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(72, 0x28db77f523047d84, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(73, 0x32caab7b40c72493, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(74, 0x3c9ebe0a15c9bebc, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(75, 0x431d67c49c100d4c, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(76, 0x4cc5d4becb3e42b6, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(77, 0x597f299cfc657e2a, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(78, 0x5fcb6fab3ad6faec, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(79, 0x6c44198c4a475817, R9, R10, R11, R12, R13, R14, R15, R8) - - MOVQ dig+0(FP), BP - ADDQ (0*8)(BP), R8 // H0 = a + H0 - MOVQ R8, (0*8)(BP) - ADDQ (1*8)(BP), R9 // H1 = b + H1 - MOVQ R9, (1*8)(BP) - ADDQ (2*8)(BP), R10 // H2 = c + H2 - MOVQ R10, (2*8)(BP) - ADDQ (3*8)(BP), R11 // H3 = d + H3 - MOVQ R11, (3*8)(BP) - ADDQ (4*8)(BP), R12 // H4 = e + H4 - MOVQ R12, (4*8)(BP) - ADDQ (5*8)(BP), R13 // H5 = f + H5 - MOVQ R13, (5*8)(BP) - ADDQ (6*8)(BP), R14 // H6 = g + H6 - MOVQ R14, (6*8)(BP) - ADDQ (7*8)(BP), R15 // H7 = h + H7 - MOVQ R15, (7*8)(BP) - - ADDQ $128, SI - CMPQ SI, 640(SP) - JB loop - -end: - RET diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go b/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go deleted file mode 100644 index 8194506b..00000000 --- a/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64 s390x ppc64le - -package sha512 - -//go:noescape - -func block(dig *digest, p []byte) diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512block_generic.go b/vendor/github.com/stevvooe/resumable/sha512/sha512block_generic.go deleted file mode 100644 index 08f2e071..00000000 --- a/vendor/github.com/stevvooe/resumable/sha512/sha512block_generic.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!s390x,!ppc64le - -package sha512 - -var block = blockGeneric diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.go b/vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.go deleted file mode 100644 index f05dc18e..00000000 --- a/vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha512 - -// featureCheck reports whether the CPU supports the -// SHA512 compute intermediate message digest (KIMD) -// function code. -func featureCheck() bool - -var useAsm = featureCheck() diff --git a/vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.s b/vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.s deleted file mode 100644 index aab81e2b..00000000 --- a/vendor/github.com/stevvooe/resumable/sha512/sha512block_s390x.s +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// func featureCheck() bool -TEXT ·featureCheck(SB),NOSPLIT,$16-1 - LA tmp-16(SP), R1 - XOR R0, R0 // query function code is 0 - WORD $0xB93E0006 // KIMD (R6 is ignored) - MOVBZ tmp-16(SP), R4 // get the first byte - AND $0x10, R4 // bit 3 (big endian) for SHA512 - CMPBEQ R4, $0, nosha512 - MOVB $1, ret+0(FP) - RET -nosha512: - MOVB $0, ret+0(FP) - RET - -// func block(dig *digest, p []byte) -TEXT ·block(SB),NOSPLIT,$0-32 - MOVBZ ·useAsm(SB), R4 - LMG dig+0(FP), R1, R3 // R2 = &p[0], R3 = len(p) - CMPBNE R4, $1, generic - MOVBZ $3, R0 // SHA512 function code -loop: - WORD $0xB93E0002 // KIMD R2 - BVS loop // continue if interrupted -done: - XOR R0, R0 // restore R0 - RET -generic: - BR ·blockGeneric(SB) From 9da0f07c928fd358601c3677fb2f9405bd41c284 Mon Sep 17 00:00:00 2001 From: David Wu Date: Tue, 11 Sep 2018 00:19:14 -0700 Subject: [PATCH 161/276] update travis with go 1.11 Signed-off-by: David Wu --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 3b5713a4..c776bc3e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,6 +9,7 @@ language: go go: - "1.9.x" - "1.10.x" + - "1.11.x" go_import_path: github.com/docker/distribution From 166874ade92af036f20948cd0493b7fc9e54eb24 Mon Sep 17 00:00:00 2001 From: David Wu Date: Tue, 11 Sep 2018 11:40:18 -0700 Subject: [PATCH 162/276] fix gofmt and goimports Signed-off-by: David Wu --- registry/storage/driver/s3-aws/s3.go | 10 +++++----- registry/storage/linkedblobstore.go | 16 ++++++++-------- registry/storage/linkedblobstore_test.go | 4 ++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 9cd87dba..800435d0 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -476,11 +476,11 @@ func New(params DriverParameters) (*Driver, error) { // } d := &driver{ - S3: s3obj, - Bucket: params.Bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - KeyID: params.KeyID, + S3: s3obj, + Bucket: params.Bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + KeyID: params.KeyID, MultipartCopyChunkSize: params.MultipartCopyChunkSize, MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency, MultipartCopyThresholdSize: params.MultipartCopyThresholdSize, diff --git a/registry/storage/linkedblobstore.go b/registry/storage/linkedblobstore.go index 3fb1da26..de591c8a 100644 --- a/registry/storage/linkedblobstore.go +++ b/registry/storage/linkedblobstore.go @@ -312,14 +312,14 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string } bw := &blobWriter{ - ctx: ctx, - blobStore: lbs, - id: uuid, - startedAt: startedAt, - digester: digest.Canonical.Digester(), - fileWriter: fw, - driver: lbs.driver, - path: path, + ctx: ctx, + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.Digester(), + fileWriter: fw, + driver: lbs.driver, + path: path, resumableDigestEnabled: lbs.resumableDigestEnabled, } diff --git a/registry/storage/linkedblobstore_test.go b/registry/storage/linkedblobstore_test.go index 85376f71..e0ffd279 100644 --- a/registry/storage/linkedblobstore_test.go +++ b/registry/storage/linkedblobstore_test.go @@ -162,8 +162,8 @@ type mockBlobDescriptorServiceFactory struct { func (f *mockBlobDescriptorServiceFactory) BlobAccessController(svc distribution.BlobDescriptorService) distribution.BlobDescriptorService { return &mockBlobDescriptorService{ BlobDescriptorService: svc, - t: f.t, - stats: f.stats, + t: f.t, + stats: f.stats, } } From bd41413d57e672049bf71b1e146c88a48b432be0 Mon Sep 17 00:00:00 2001 From: David Wu Date: Tue, 11 Sep 2018 14:19:08 -0700 Subject: [PATCH 163/276] remove closenotifier Signed-off-by: David Wu --- context/http.go | 30 ------------------------------ registry/handlers/api_test.go | 29 ----------------------------- registry/handlers/helpers.go | 8 +------- 3 files changed, 1 insertion(+), 66 deletions(-) diff --git a/context/http.go b/context/http.go index b7447800..bc22f0bb 100644 --- a/context/http.go +++ b/context/http.go @@ -104,18 +104,6 @@ func GetRequestID(ctx context.Context) string { // WithResponseWriter returns a new context and response writer that makes // interesting response statistics available within the context. func WithResponseWriter(ctx context.Context, w http.ResponseWriter) (context.Context, http.ResponseWriter) { - if closeNotifier, ok := w.(http.CloseNotifier); ok { - irwCN := &instrumentedResponseWriterCN{ - instrumentedResponseWriter: instrumentedResponseWriter{ - ResponseWriter: w, - Context: ctx, - }, - CloseNotifier: closeNotifier, - } - - return irwCN, irwCN - } - irw := instrumentedResponseWriter{ ResponseWriter: w, Context: ctx, @@ -270,14 +258,6 @@ func (ctx *muxVarsContext) Value(key interface{}) interface{} { return ctx.Context.Value(key) } -// instrumentedResponseWriterCN provides response writer information in a -// context. It implements http.CloseNotifier so that users can detect -// early disconnects. -type instrumentedResponseWriterCN struct { - instrumentedResponseWriter - http.CloseNotifier -} - // instrumentedResponseWriter provides response writer information in a // context. This variant is only used in the case where CloseNotifier is not // implemented by the parent ResponseWriter. @@ -355,13 +335,3 @@ func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { fallback: return irw.Context.Value(key) } - -func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.response" { - return irw - } - } - - return irw.instrumentedResponseWriter.Value(key) -} diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go index bed0ab2c..2544efba 100644 --- a/registry/handlers/api_test.go +++ b/registry/handlers/api_test.go @@ -2529,35 +2529,6 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { } -// TestCheckContextNotifier makes sure the API endpoints get a ResponseWriter -// that implements http.ContextNotifier. -func TestCheckContextNotifier(t *testing.T) { - env := newTestEnv(t, false) - defer env.Shutdown() - - // Register a new endpoint for testing - env.app.router.Handle("/unittest/{name}/", env.app.dispatcher(func(ctx *Context, r *http.Request) http.Handler { - return handlers.MethodHandler{ - "GET": http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if _, ok := w.(http.CloseNotifier); !ok { - t.Fatal("could not cast ResponseWriter to CloseNotifier") - } - w.WriteHeader(200) - }), - } - })) - - resp, err := http.Get(env.server.URL + "/unittest/reponame/") - if err != nil { - t.Fatalf("unexpected error issuing request: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Fatalf("wrong status code - expected 200, got %d", resp.StatusCode) - } -} - func TestProxyManifestGetByTag(t *testing.T) { truthConfig := configuration.Configuration{ Storage: configuration.Storage{ diff --git a/registry/handlers/helpers.go b/registry/handlers/helpers.go index 46ad797e..b02338e9 100644 --- a/registry/handlers/helpers.go +++ b/registry/handlers/helpers.go @@ -27,13 +27,7 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { // The copy will be limited to `limit` bytes, if limit is greater than zero. func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, action string) error { // Get a channel that tells us if the client disconnects - var clientClosed <-chan bool - if notifier, ok := responseWriter.(http.CloseNotifier); ok { - clientClosed = notifier.CloseNotify() - } else { - dcontext.GetLogger(ctx).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) - } - + clientClosed := r.Context().Done() var body = r.Body if limit > 0 { body = http.MaxBytesReader(responseWriter, body, limit) From d8bde9b97ef9b8853969c74cd322c1e95d32ab5b Mon Sep 17 00:00:00 2001 From: David Wu Date: Tue, 11 Sep 2018 16:26:21 -0700 Subject: [PATCH 164/276] remove go 1.9 and 1.10 checks from travis Signed-off-by: David Wu --- .travis.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index c776bc3e..44ced604 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,8 +7,6 @@ services: language: go go: - - "1.9.x" - - "1.10.x" - "1.11.x" go_import_path: github.com/docker/distribution From a927fbdb9bd6c4457ca67e758054ce477cc4ed81 Mon Sep 17 00:00:00 2001 From: David Wu Date: Wed, 12 Sep 2018 14:11:49 -0700 Subject: [PATCH 165/276] track digest offset in blobwriter Signed-off-by: David Wu --- registry/storage/blobwriter.go | 15 ++++++++++++--- registry/storage/blobwriter_resumable.go | 24 +++++++----------------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/registry/storage/blobwriter.go b/registry/storage/blobwriter.go index 2bb25dcc..54c79b6f 100644 --- a/registry/storage/blobwriter.go +++ b/registry/storage/blobwriter.go @@ -33,7 +33,7 @@ type blobWriter struct { id string startedAt time.Time digester digest.Digester - written int64 // track the contiguous write + written int64 // track the write to digester fileWriter storagedriver.FileWriter driver storagedriver.StorageDriver @@ -119,7 +119,12 @@ func (bw *blobWriter) Write(p []byte) (int, error) { return 0, err } - n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p) + _, err := bw.fileWriter.Write(p) + if err != nil { + return 0, err + } + + n, err := bw.digester.Hash().Write(p) bw.written += int64(n) return n, err @@ -133,7 +138,11 @@ func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { return 0, err } - nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r) + // Using a TeeReader instead of MultiWriter ensures Copy returns + // the amount written to the digester as well as ensuring that we + // write to the fileWriter first + tee := io.TeeReader(r, bw.fileWriter) + nn, err := io.Copy(bw.digester.Hash(), tee) bw.written += nn return nn, err diff --git a/registry/storage/blobwriter_resumable.go b/registry/storage/blobwriter_resumable.go index 1f3b93d4..b970e865 100644 --- a/registry/storage/blobwriter_resumable.go +++ b/registry/storage/blobwriter_resumable.go @@ -21,18 +21,13 @@ func (bw *blobWriter) resumeDigest(ctx context.Context) error { return errResumableDigestNotAvailable } - h, ok := bw.digester.Hash().(encoding.BinaryMarshaler) + h, ok := bw.digester.Hash().(encoding.BinaryUnmarshaler) if !ok { return errResumableDigestNotAvailable } - state, err := h.MarshalBinary() - if err != nil { - return err - } - offset := bw.fileWriter.Size() - if offset == int64(len(state)) { + if offset == bw.written { // State of digester is already at the requested offset. return nil } @@ -62,19 +57,14 @@ func (bw *blobWriter) resumeDigest(ctx context.Context) error { return err } - // This type assertion is safe since we already did an assertion at the beginning - if err = h.(encoding.BinaryUnmarshaler).UnmarshalBinary(storedState); err != nil { - return err - } - - state, err = h.(encoding.BinaryMarshaler).MarshalBinary() - if err != nil { + if err = h.UnmarshalBinary(storedState); err != nil { return err } + bw.written = hashStateMatch.offset } // Mind the gap. - if gapLen := offset - int64(len(state)); gapLen > 0 { + if gapLen := offset - bw.written; gapLen > 0 { return errResumableDigestNotAvailable } @@ -136,14 +126,14 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error { state, err := h.MarshalBinary() if err != nil { - return fmt.Errorf("could not marshal: %v", err) + return err } uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ name: bw.blobStore.repository.Named().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), - offset: int64(len(state)), + offset: bw.written, }) if err != nil { From 6133840f4920e37f44c170e238bbabde23d73cf7 Mon Sep 17 00:00:00 2001 From: liyongxin Date: Thu, 13 Sep 2018 14:08:39 +0800 Subject: [PATCH 166/276] typo fix from existant to existent Signed-off-by: liyongxin Signed-off-by: Yongxin Li --- registry/proxy/proxymanifeststore_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/registry/proxy/proxymanifeststore_test.go b/registry/proxy/proxymanifeststore_test.go index b80b303e..99aaed28 100644 --- a/registry/proxy/proxymanifeststore_test.go +++ b/registry/proxy/proxymanifeststore_test.go @@ -217,7 +217,7 @@ func TestProxyManifests(t *testing.T) { t.Fatalf("Error checking existence") } if !exists { - t.Errorf("Unexpected non-existant manifest") + t.Errorf("Unexpected non-existent manifest") } if (*localStats)["exists"] != 1 && (*remoteStats)["exists"] != 1 { @@ -252,7 +252,7 @@ func TestProxyManifests(t *testing.T) { t.Fatal(err) } if !exists { - t.Errorf("Unexpected non-existant manifest") + t.Errorf("Unexpected non-existent manifest") } if (*localStats)["exists"] != 2 && (*remoteStats)["exists"] != 1 { From f730f3ab77d0098f5172a825d9feecc6265f3c48 Mon Sep 17 00:00:00 2001 From: Viktor Stanchev Date: Mon, 13 Mar 2017 16:35:15 -0700 Subject: [PATCH 167/276] add autoredirect auth config It redirects the user to to the Host header's domain whenever they try to use token auth. Signed-off-by: David Wu --- registry/auth/auth.go | 4 +- registry/auth/htpasswd/access.go | 2 +- registry/auth/silly/access.go | 2 +- registry/auth/token/accesscontroller.go | 59 ++++++++++++++++--------- registry/handlers/app.go | 2 +- 5 files changed, 43 insertions(+), 26 deletions(-) diff --git a/registry/auth/auth.go b/registry/auth/auth.go index 91c7af3f..835eff73 100644 --- a/registry/auth/auth.go +++ b/registry/auth/auth.go @@ -21,7 +21,7 @@ // if ctx, err := accessController.Authorized(ctx, access); err != nil { // if challenge, ok := err.(auth.Challenge) { // // Let the challenge write the response. -// challenge.SetHeaders(w) +// challenge.SetHeaders(r, w) // w.WriteHeader(http.StatusUnauthorized) // return // } else { @@ -87,7 +87,7 @@ type Challenge interface { // adding the an HTTP challenge header on the response message. Callers // are expected to set the appropriate HTTP status code (e.g. 401) // themselves. - SetHeaders(w http.ResponseWriter) + SetHeaders(r *http.Request, w http.ResponseWriter) } // AccessController controls access to registry resources based on a request diff --git a/registry/auth/htpasswd/access.go b/registry/auth/htpasswd/access.go index eddf7ac3..2611a23b 100644 --- a/registry/auth/htpasswd/access.go +++ b/registry/auth/htpasswd/access.go @@ -111,7 +111,7 @@ type challenge struct { var _ auth.Challenge = challenge{} // SetHeaders sets the basic challenge header on the response. -func (ch challenge) SetHeaders(w http.ResponseWriter) { +func (ch challenge) SetHeaders(r *http.Request, w http.ResponseWriter) { w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) } diff --git a/registry/auth/silly/access.go b/registry/auth/silly/access.go index f7bbe6e0..3ead560d 100644 --- a/registry/auth/silly/access.go +++ b/registry/auth/silly/access.go @@ -82,7 +82,7 @@ type challenge struct { var _ auth.Challenge = challenge{} // SetHeaders sets a simple bearer challenge on the response. -func (ch challenge) SetHeaders(w http.ResponseWriter) { +func (ch challenge) SetHeaders(r *http.Request, w http.ResponseWriter) { header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) if ch.scope != "" { diff --git a/registry/auth/token/accesscontroller.go b/registry/auth/token/accesscontroller.go index 3086c2cf..33d18a48 100644 --- a/registry/auth/token/accesscontroller.go +++ b/registry/auth/token/accesscontroller.go @@ -76,10 +76,11 @@ var ( // authChallenge implements the auth.Challenge interface. type authChallenge struct { - err error - realm string - service string - accessSet accessSet + err error + realm string + autoRedirect bool + service string + accessSet accessSet } var _ auth.Challenge = authChallenge{} @@ -97,8 +98,14 @@ func (ac authChallenge) Status() int { // challengeParams constructs the value to be used in // the WWW-Authenticate response challenge header. // See https://tools.ietf.org/html/rfc6750#section-3 -func (ac authChallenge) challengeParams() string { - str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) +func (ac authChallenge) challengeParams(r *http.Request) string { + var realm string + if ac.autoRedirect { + realm = fmt.Sprintf("https://%s/auth/token", r.Host) + } else { + realm = ac.realm + } + str := fmt.Sprintf("Bearer realm=%q,service=%q", realm, ac.service) if scope := ac.accessSet.scopeParam(); scope != "" { str = fmt.Sprintf("%s,scope=%q", str, scope) @@ -114,23 +121,25 @@ func (ac authChallenge) challengeParams() string { } // SetChallenge sets the WWW-Authenticate value for the response. -func (ac authChallenge) SetHeaders(w http.ResponseWriter) { - w.Header().Add("WWW-Authenticate", ac.challengeParams()) +func (ac authChallenge) SetHeaders(r *http.Request, w http.ResponseWriter) { + w.Header().Add("WWW-Authenticate", ac.challengeParams(r)) } // accessController implements the auth.AccessController interface. type accessController struct { - realm string - issuer string - service string - rootCerts *x509.CertPool - trustedKeys map[string]libtrust.PublicKey + realm string + autoRedirect bool + issuer string + service string + rootCerts *x509.CertPool + trustedKeys map[string]libtrust.PublicKey } // tokenAccessOptions is a convenience type for handling // options to the contstructor of an accessController. type tokenAccessOptions struct { realm string + autoRedirect bool issuer string service string rootCertBundle string @@ -153,6 +162,12 @@ func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] + autoRedirect, ok := options["autoredirect"].(bool) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option bool: autoredirect") + } + opts.autoRedirect = autoRedirect + return opts, nil } @@ -205,11 +220,12 @@ func newAccessController(options map[string]interface{}) (auth.AccessController, } return &accessController{ - realm: config.realm, - issuer: config.issuer, - service: config.service, - rootCerts: rootPool, - trustedKeys: trustedKeys, + realm: config.realm, + autoRedirect: config.autoRedirect, + issuer: config.issuer, + service: config.service, + rootCerts: rootPool, + trustedKeys: trustedKeys, }, nil } @@ -217,9 +233,10 @@ func newAccessController(options map[string]interface{}) (auth.AccessController, // for actions on resources described by the given access items. func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { challenge := &authChallenge{ - realm: ac.realm, - service: ac.service, - accessSet: newAccessSet(accessItems...), + realm: ac.realm, + autoRedirect: ac.autoRedirect, + service: ac.service, + accessSet: newAccessSet(accessItems...), } req, err := dcontext.GetRequest(ctx) diff --git a/registry/handlers/app.go b/registry/handlers/app.go index cf15a8fa..35c27df1 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -847,7 +847,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont switch err := err.(type) { case auth.Challenge: // Add the appropriate WWW-Auth header - err.SetHeaders(w) + err.SetHeaders(r, w) if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) From b2bd46576034ecc0223278e8c92cfddb40271b4b Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 20 Sep 2018 14:53:34 -0700 Subject: [PATCH 168/276] fix checks Signed-off-by: David Wu --- contrib/token-server/main.go | 2 +- registry/auth/htpasswd/access_test.go | 2 +- registry/auth/silly/access_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/token-server/main.go b/contrib/token-server/main.go index 138793c7..8f9029ea 100644 --- a/contrib/token-server/main.go +++ b/contrib/token-server/main.go @@ -245,7 +245,7 @@ func (ts *tokenServer) getToken(ctx context.Context, w http.ResponseWriter, r *h // Get response context. ctx, w = dcontext.WithResponseWriter(ctx, w) - challenge.SetHeaders(w) + challenge.SetHeaders(r, w) handleError(ctx, errcode.ErrorCodeUnauthorized.WithDetail(challenge.Error()), w) dcontext.GetResponseLogger(ctx).Info("get token authentication challenge") diff --git a/registry/auth/htpasswd/access_test.go b/registry/auth/htpasswd/access_test.go index 7a3d411e..0bfc427e 100644 --- a/registry/auth/htpasswd/access_test.go +++ b/registry/auth/htpasswd/access_test.go @@ -50,7 +50,7 @@ func TestBasicAccessController(t *testing.T) { if err != nil { switch err := err.(type) { case auth.Challenge: - err.SetHeaders(w) + err.SetHeaders(r, w) w.WriteHeader(http.StatusUnauthorized) return default: diff --git a/registry/auth/silly/access_test.go b/registry/auth/silly/access_test.go index 0a5103e6..19824494 100644 --- a/registry/auth/silly/access_test.go +++ b/registry/auth/silly/access_test.go @@ -21,7 +21,7 @@ func TestSillyAccessController(t *testing.T) { if err != nil { switch err := err.(type) { case auth.Challenge: - err.SetHeaders(w) + err.SetHeaders(r, w) w.WriteHeader(http.StatusUnauthorized) return default: From 2e1e6307dd4346049fe68c90f8117855b8c00347 Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 20 Sep 2018 18:54:57 -0700 Subject: [PATCH 169/276] add autoredirect to option Signed-off-by: David Wu --- registry/auth/token/token_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/registry/auth/token/token_test.go b/registry/auth/token/token_test.go index 03dce6fa..69f3e78b 100644 --- a/registry/auth/token/token_test.go +++ b/registry/auth/token/token_test.go @@ -333,6 +333,7 @@ func TestAccessController(t *testing.T) { "issuer": issuer, "service": service, "rootcertbundle": rootCertBundleFilename, + "autoredirect": false, } accessController, err := newAccessController(options) @@ -518,6 +519,7 @@ func TestNewAccessControllerPemBlock(t *testing.T) { "issuer": issuer, "service": service, "rootcertbundle": rootCertBundleFilename, + "autoredirect": false, } ac, err := newAccessController(options) From 569d18aef98df1f66de04cc56cfdb06b9f4337fa Mon Sep 17 00:00:00 2001 From: Rui Cao Date: Mon, 24 Sep 2018 09:05:44 +0800 Subject: [PATCH 170/276] Fix some typos Signed-off-by: Rui Cao --- registry/handlers/app.go | 4 ++-- registry/storage/cache/redis/redis_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/registry/handlers/app.go b/registry/handlers/app.go index cf15a8fa..ece0e52f 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -555,7 +555,7 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { _, err := c.Do("PING") return err }, - Wait: false, // if a connection is not avialable, proceed without cache. + Wait: false, // if a connection is not available, proceed without cache. } app.redis = pool @@ -997,7 +997,7 @@ func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []co // uploadPurgeDefaultConfig provides a default configuration for upload // purging to be used in the absence of configuration in the -// confifuration file +// configuration file func uploadPurgeDefaultConfig() map[interface{}]interface{} { config := map[interface{}]interface{}{} config["enabled"] = true diff --git a/registry/storage/cache/redis/redis_test.go b/registry/storage/cache/redis/redis_test.go index d324842d..03cc85f1 100644 --- a/registry/storage/cache/redis/redis_test.go +++ b/registry/storage/cache/redis/redis_test.go @@ -39,7 +39,7 @@ func TestRedisBlobDescriptorCacheProvider(t *testing.T) { _, err := c.Do("PING") return err }, - Wait: false, // if a connection is not avialable, proceed without cache. + Wait: false, // if a connection is not available, proceed without cache. } // Clear the database From 06a4c2f61b6acbe3bbf53f50982e81ababa6964e Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 20 Sep 2018 16:41:33 -0700 Subject: [PATCH 171/276] Update mailmap file Signed-off-by: Derek McGowan --- .mailmap | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/.mailmap b/.mailmap index d9910601..0f48321d 100644 --- a/.mailmap +++ b/.mailmap @@ -1,9 +1,9 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland Brian Bland Brian Bland -Josh Hawn Josh Hawn +Josh Hawn Josh Hawn Richard Scothern Richard Richard Scothern Richard Scothern Andrew Meredith Andrew Meredith @@ -16,3 +16,17 @@ davidli davidli Omer Cohen Omer Cohen Eric Yang Eric Yang Nikita Tarasov Nikita +Yu Wang yuwaMSFT2 +Yu Wang Yu Wang (UC) +Olivier Gambier dmp +Olivier Gambier Olivier +Olivier Gambier Olivier +Elsan Li 李楠 elsanli(李楠) +Rui Cao ruicao +Gwendolynne Barr gbarr01 +Haibing Zhou 周海兵 zhouhaibing089 +Feng Honglin tifayuki +Helen Xie Helen-xie +Mike Brown Mike Brown +Manish Tomar Manish Tomar +Sakeven Jiang sakeven From 2eb7a17225801ae991919918716e7fabb56173f1 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 20 Sep 2018 16:41:45 -0700 Subject: [PATCH 172/276] Add 2.7.0-rc release notes Signed-off-by: Derek McGowan --- releases/v2.7.0-rc.toml | 48 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 releases/v2.7.0-rc.toml diff --git a/releases/v2.7.0-rc.toml b/releases/v2.7.0-rc.toml new file mode 100644 index 00000000..a0ec95aa --- /dev/null +++ b/releases/v2.7.0-rc.toml @@ -0,0 +1,48 @@ +# commit to be tagged for new release +commit = "HEAD" + +project_name = "registry" +github_repo = "docker/distribution" + +# previous release +previous = "v2.6.0" + +pre_release = true + +preface = """\ +The 2.7 registry release has been a long time coming and represents both +a long gap since the previous release and a renewed effort to release +regularly. The maintainers were committed to get OCI support into the +next release and thanks to much effort in the community that has +been accomplished. + +## OCI Support + +### Push and Pull of OCI Images + +The registry now allows pushing and pulling OCI images. OCI images will always +be preserved exactly without conversion to older types. With this change, +clients which implement OCI can feel comfortable creating OCI images as part of +their container image build process. + +## Specification Donation + +The Distribution specification which has had 4 years of review, implementation, +and production use is now part of OCI. As part of that move, specification +changes will no longer be accepted in the open source registry and should +instead go to [OCI's distribution-spec](https://github.com/opencontainers/distribution-spec/issues). + +## Bug fixes + +Many many fixes and improvements, see the change log below +""" + +# notable prs to include in the release notes, 1234 is the pr number +[notes] + +[breaking] + +[rename_deps] + [rename_deps.logrus] + old = "github.com/Sirupsen/logrus" + new = "github.com/sirupsen/logrus" From 97cb7f35b04c2b68931385852a2fbd31cbb54a92 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Thu, 20 Sep 2018 15:22:32 -0700 Subject: [PATCH 173/276] Update release documents Remove redundant AUTHORS file in favor of relying on .mailmap for keeping track of author names. Move older changelogs to release directory. Move release checklist to releases README. Signed-off-by: Derek McGowan --- AUTHORS | 182 ------------------ Makefile | 3 - RELEASE-CHECKLIST.md | 44 ----- releases/README.md | 46 +++++ releases/v2.5.0-changelog.txt | 33 ++++ CHANGELOG.md => releases/v2.6.0-changelog.txt | 35 ---- 6 files changed, 79 insertions(+), 264 deletions(-) delete mode 100644 AUTHORS delete mode 100644 RELEASE-CHECKLIST.md create mode 100644 releases/README.md create mode 100644 releases/v2.5.0-changelog.txt rename CHANGELOG.md => releases/v2.6.0-changelog.txt (74%) diff --git a/AUTHORS b/AUTHORS deleted file mode 100644 index 252ff8aa..00000000 --- a/AUTHORS +++ /dev/null @@ -1,182 +0,0 @@ -a-palchikov -Aaron Lehmann -Aaron Schlesinger -Aaron Vinson -Adam Duke -Adam Enger -Adrian Mouat -Ahmet Alp Balkan -Alex Chan -Alex Elman -Alexey Gladkov -allencloud -amitshukla -Amy Lindburg -Andrew Hsu -Andrew Meredith -Andrew T Nguyen -Andrey Kostov -Andy Goldstein -Anis Elleuch -Anton Tiurin -Antonio Mercado -Antonio Murdaca -Anusha Ragunathan -Arien Holthuizen -Arnaud Porterie -Arthur Baars -Asuka Suzuki -Avi Miller -Ayose Cazorla -BadZen -Ben Bodenmiller -Ben Firshman -bin liu -Brian Bland -burnettk -Carson A -Cezar Sa Espinola -Charles Smith -Chris Dillon -cuiwei13 -cyli -Daisuke Fujita -Daniel Huhn -Darren Shepherd -Dave Trombley -Dave Tucker -David Lawrence -David Verhasselt -David Xia -davidli -Dejan Golja -Derek McGowan -Diogo Mónica -DJ Enriquez -Donald Huang -Doug Davis -Edgar Lee -Eric Yang -Fabio Berchtold -Fabio Huser -farmerworking -Felix Yan -Florentin Raud -Frank Chen -Frederick F. Kautz IV -gabriell nascimento -Gleb Schukin -harche -Henri Gomez -Hu Keping -Hua Wang -HuKeping -Ian Babrou -igayoso -Jack Griffin -James Findley -Jason Freidman -Jason Heiss -Jeff Nickoloff -Jess Frazelle -Jessie Frazelle -jhaohai -Jianqing Wang -Jihoon Chung -Joao Fernandes -John Mulhausen -John Starks -Jon Johnson -Jon Poler -Jonathan Boulle -Jordan Liggitt -Josh Chorlton -Josh Hawn -Julien Fernandez -Ke Xu -Keerthan Mala -Kelsey Hightower -Kenneth Lim -Kenny Leung -Li Yi -Liu Hua -liuchang0812 -Lloyd Ramey -Louis Kottmann -Luke Carpenter -Marcus Martins -Mary Anthony -Matt Bentley -Matt Duch -Matt Moore -Matt Robenolt -Matthew Green -Michael Prokop -Michal Minar -Michal Minář -Mike Brown -Miquel Sabaté -Misty Stanley-Jones -Misty Stanley-Jones -Morgan Bauer -moxiegirl -Nathan Sullivan -nevermosby -Nghia Tran -Nikita Tarasov -Noah Treuhaft -Nuutti Kotivuori -Oilbeater -Olivier Gambier -Olivier Jacques -Omer Cohen -Patrick Devine -Phil Estes -Philip Misiowiec -Pierre-Yves Ritschard -Qiao Anran -Randy Barlow -Richard Scothern -Rodolfo Carvalho -Rusty Conover -Sean Boran -Sebastiaan van Stijn -Sebastien Coavoux -Serge Dubrouski -Sharif Nassar -Shawn Falkner-Horine -Shreyas Karnik -Simon Thulbourn -spacexnice -Spencer Rinehart -Stan Hu -Stefan Majewsky -Stefan Weil -Stephen J Day -Sungho Moon -Sven Dowideit -Sylvain Baubeau -Ted Reed -tgic -Thomas Sjögren -Tianon Gravi -Tibor Vass -Tonis Tiigi -Tony Holdstock-Brown -Trevor Pounds -Troels Thomsen -Victor Vieux -Victoria Bialas -Vincent Batts -Vincent Demeester -Vincent Giersch -W. Trevor King -weiyuan.yl -xg.song -xiekeyang -Yann ROBERT -yaoyao.xyy -yuexiao-wang -yuzou -zhouhaibing089 -姜继忠 diff --git a/Makefile b/Makefile index df89bb0c..4635c6ec 100644 --- a/Makefile +++ b/Makefile @@ -43,9 +43,6 @@ TESTFLAGS_PARALLEL ?= 8 all: binaries -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ - # This only needs to be generated by hand when cutting full releases. version/version.go: @echo "$(WHALE) $@" diff --git a/RELEASE-CHECKLIST.md b/RELEASE-CHECKLIST.md deleted file mode 100644 index 73eba5a8..00000000 --- a/RELEASE-CHECKLIST.md +++ /dev/null @@ -1,44 +0,0 @@ -## Registry Release Checklist - -10. Compile release notes detailing features and since the last release. - - Update the `CHANGELOG.md` file and create a PR to master with the updates. -Once that PR has been approved by maintainers the change may be cherry-picked -to the release branch (new release branches may be forked from this commit). - -20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` - -30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. - -``` -make AUTHORS -``` - -40. Create a signed tag. - - Distribution uses semantic versioning. Tags are of the format -`vx.y.z[-rcn]`. You will need PGP installed and a PGP key which has been added -to your Github account. The comment for the tag should include the release -notes, use previous tags as a guide for formatting consistently. Run -`git tag -s vx.y.z[-rcn]` to create tag and `git -v vx.y.z[-rcn]` to verify tag, -check comment and correct commit hash. - -50. Push the signed tag - -60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. - -70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. - -80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. -e.g. to release `2.3.1` - - `2.3.1 (new)` - - `2.3.0 -> 2.3.0` can be removed - - `2 -> 2.3.1` - - `2.3 -> 2.3.1` - -90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. - diff --git a/releases/README.md b/releases/README.md new file mode 100644 index 00000000..7af9e97b --- /dev/null +++ b/releases/README.md @@ -0,0 +1,46 @@ +## Registry Release Checklist + +10. Compile release notes detailing features added since the last release. + + Add release template file to `releases/` directory. The template is defined +by containerd's release tool. Name the file using the version, for rc add +an `-rc` suffix. +See https://github.com/containerd/containerd/tree/master/cmd/containerd-release + +20. Update the `.mailmap` files. + +30. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` + +40. Create a signed tag. + + Choose a tag for the next release, distribution uses semantic versioning +and expects tags to be formatted as `vx.y.z[-rc.n]`. Run the release tool using +the release template file and tag to generate the release notes for the tag +and Github release. To create the tag, you will need PGP installed and a PGP +key which has been added to your Github account. The comment for the tag will +be the generate release notes, always compare with previous tags to ensure +the output is expected and consistent. +Run `git tag --cleanup=whitespace -s vx.y.z[-rc.n] -F release-notes` to create +tag and `git -v vx.y.z[-rc.n]` to verify tag, check comment and correct commit +hash. + +50. Push the signed tag + +60. Create a new [release](https://github.com/docker/distribution/releases). +In the case of a release candidate, tick the `pre-release` checkbox. Use +the generate release notes from the release tool + +70. Update the registry binary in the [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. + +80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. +e.g. to release `2.3.1` + + `2.3.1 (new)` + + `2.3.0 -> 2.3.0` can be removed + + `2 -> 2.3.1` + + `2.3 -> 2.3.1` + +90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. diff --git a/releases/v2.5.0-changelog.txt b/releases/v2.5.0-changelog.txt new file mode 100644 index 00000000..b021d052 --- /dev/null +++ b/releases/v2.5.0-changelog.txt @@ -0,0 +1,33 @@ +# Changelog + +## 2.5.0 (2016-06-14) + +#### Storage +- Ensure uploads directory is cleaned after upload is committed +- Add ability to cap concurrent operations in filesystem driver +- S3: Add 'us-gov-west-1' to the valid region list +- Swift: Handle ceph not returning Last-Modified header for HEAD requests +- Add redirect middleware + +#### Registry +- Add support for blobAccessController middleware +- Add support for layers from foreign sources +- Remove signature store +- Add support for Let's Encrypt +- Correct yaml key names in configuration + +#### Client +- Add option to get content digest from manifest get + +#### Spec +- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported +- Clarify API documentation around catalog fetch behavior + +#### API +- Support returning HTTP 429 (Too Many Requests) + +#### Documentation +- Update auth documentation examples to show "expires in" as int + +#### Docker Image +- Use Alpine Linux as base image diff --git a/CHANGELOG.md b/releases/v2.6.0-changelog.txt similarity index 74% rename from CHANGELOG.md rename to releases/v2.6.0-changelog.txt index e7b16b3c..b269beaa 100644 --- a/CHANGELOG.md +++ b/releases/v2.6.0-changelog.txt @@ -71,38 +71,3 @@ Documentation has moved to the documentation repository at `github.com/docker/docker.github.io/tree/master/registry` The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. - - -## 2.5.0 (2016-06-14) - -#### Storage -- Ensure uploads directory is cleaned after upload is committed -- Add ability to cap concurrent operations in filesystem driver -- S3: Add 'us-gov-west-1' to the valid region list -- Swift: Handle ceph not returning Last-Modified header for HEAD requests -- Add redirect middleware - -#### Registry -- Add support for blobAccessController middleware -- Add support for layers from foreign sources -- Remove signature store -- Add support for Let's Encrypt -- Correct yaml key names in configuration - -#### Client -- Add option to get content digest from manifest get - -#### Spec -- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported -- Clarify API documentation around catalog fetch behavior - -#### API -- Support returning HTTP 429 (Too Many Requests) - -#### Documentation -- Update auth documentation examples to show "expires in" as int - -#### Docker Image -- Use Alpine Linux as base image - - From de8636b78cc746ad92c3c43760adaa2a58057263 Mon Sep 17 00:00:00 2001 From: Yongxin Li Date: Thu, 27 Sep 2018 20:27:09 +0800 Subject: [PATCH 174/276] typo fix about overridden Signed-off-by: Yongxin Li --- registry/client/repository_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/registry/client/repository_test.go b/registry/client/repository_test.go index 4018a928..88e7e4b5 100644 --- a/registry/client/repository_test.go +++ b/registry/client/repository_test.go @@ -1336,7 +1336,7 @@ func TestSanitizeLocation(t *testing.T) { expected: "http://blahalaja.com/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", }, { - description: "ensure new hostname overidden", + description: "ensure new hostname overridden", location: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", source: "http://blahalaja.com/v1", expected: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", From f08b3486c8eebefb8f81ed8ded64f6a5715bb017 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Fri, 28 Sep 2018 16:07:56 -0700 Subject: [PATCH 175/276] Update version to 2.7.0-rc.0 Signed-off-by: Derek McGowan --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index 5954d95d..17755531 100644 --- a/version/version.go +++ b/version/version.go @@ -8,7 +8,7 @@ var Package = "github.com/docker/distribution" // the latest release tag by hand, always suffixed by "+unknown". During // build, it will be replaced by the actual version. The value here will be // used if the registry is run after a go get based install. -var Version = "v2.6.0+unknown" +var Version = "v2.7.0-rc.0+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. From 6335cc258f052a5810d700c153e7f414808ec01c Mon Sep 17 00:00:00 2001 From: Rui Cao Date: Sat, 29 Sep 2018 15:12:10 +0800 Subject: [PATCH 176/276] Fix typo: commmand -> command Signed-off-by: Rui Cao --- contrib/compose/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/compose/README.md b/contrib/compose/README.md index 45050b70..cb2a81f8 100644 --- a/contrib/compose/README.md +++ b/contrib/compose/README.md @@ -70,7 +70,7 @@ to the 1.0 registry. Requests from newer clients will route to the 2.0 registry. Removing intermediate container edb84c2b40cb Successfully built 74acc70fa106 - The commmand outputs its progress until it completes. + The command outputs its progress until it completes. 4. Start your configuration with compose. From 8a800e1292c159acc84c024ee2668241b3fe6168 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Mon, 15 Oct 2018 15:46:01 -0700 Subject: [PATCH 177/276] update Dockerfile to multi-stage Signed-off-by: Tonis Tiigi --- Dockerfile | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index e0d74684..abf8d1a0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.10-alpine +FROM golang:1.11-alpine AS build ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV DOCKER_BUILDTAGS include_oss include_gcs @@ -7,14 +7,15 @@ ARG GOOS=linux ARG GOARCH=amd64 RUN set -ex \ - && apk add --no-cache make git + && apk add --no-cache make git file WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR +RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked" + +FROM alpine COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml - -RUN make PREFIX=/go clean binaries - +COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] From cd1648d62cb75aa6f22512acaa985cd215a751aa Mon Sep 17 00:00:00 2001 From: Grachev Mikhail Date: Sun, 14 Oct 2018 22:01:57 +0300 Subject: [PATCH 178/276] Fix typo Signed-off-by: Mikhail Grachev --- contrib/docker-integration/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/docker-integration/README.md b/contrib/docker-integration/README.md index bc5be9d9..2b44e88c 100644 --- a/contrib/docker-integration/README.md +++ b/contrib/docker-integration/README.md @@ -35,7 +35,7 @@ the [release page](https://github.com/docker/golem/releases/tag/v0.1). #### Running golem with docker -Additionally golem can be run as a docker image requiring no additonal +Additionally golem can be run as a docker image requiring no additional installation. `docker run --privileged -v "$GOPATH/src/github.com/docker/distribution/contrib/docker-integration:/test" -w /test distribution/golem golem -rundaemon .` From 7c4d584e58eeb0195c7478353a1544d0a288ee49 Mon Sep 17 00:00:00 2001 From: Matt Tescher Date: Thu, 25 Oct 2018 14:38:26 -0700 Subject: [PATCH 179/276] add bugsnag logrus hook Signed-off-by: Matt Tescher --- registry/registry.go | 42 +++++++--- vendor.conf | 1 + .../github.com/Shopify/logrus-bugsnag/LICENSE | 21 +++++ .../Shopify/logrus-bugsnag/README.md | 24 ++++++ .../Shopify/logrus-bugsnag/bugsnag.go | 81 +++++++++++++++++++ 5 files changed, 156 insertions(+), 13 deletions(-) create mode 100644 vendor/github.com/Shopify/logrus-bugsnag/LICENSE create mode 100644 vendor/github.com/Shopify/logrus-bugsnag/README.md create mode 100644 vendor/github.com/Shopify/logrus-bugsnag/bugsnag.go diff --git a/registry/registry.go b/registry/registry.go index 44c0edf5..18698f5b 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -14,6 +14,7 @@ import ( "rsc.io/letsencrypt" + "github.com/Shopify/logrus-bugsnag" logstash "github.com/bshuster-repo/logrus-logstash-hook" "github.com/bugsnag/bugsnag-go" "github.com/docker/distribution/configuration" @@ -95,6 +96,8 @@ func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Reg return nil, fmt.Errorf("error configuring logger: %v", err) } + configureBugsnag(config) + // inject a logger into the uuid library. warns us if there is a problem // with uuid generation under low entropy. uuid.Loggerf = dcontext.GetLogger(ctx).Warnf @@ -229,19 +232,6 @@ func configureReporting(app *handlers.App) http.Handler { var handler http.Handler = app if app.Config.Reporting.Bugsnag.APIKey != "" { - bugsnagConfig := bugsnag.Configuration{ - APIKey: app.Config.Reporting.Bugsnag.APIKey, - // TODO(brianbland): provide the registry version here - // AppVersion: "2.0", - } - if app.Config.Reporting.Bugsnag.ReleaseStage != "" { - bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage - } - if app.Config.Reporting.Bugsnag.Endpoint != "" { - bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint - } - bugsnag.Configure(bugsnagConfig) - handler = bugsnag.Handler(handler) } @@ -319,6 +309,32 @@ func logLevel(level configuration.Loglevel) log.Level { return l } +// configureBugsnag configures bugsnag reporting, if enabled +func configureBugsnag(config *configuration.Configuration) { + if config.Reporting.Bugsnag.APIKey == "" { + return + } + + bugsnagConfig := bugsnag.Configuration{ + APIKey: config.Reporting.Bugsnag.APIKey, + } + if config.Reporting.Bugsnag.ReleaseStage != "" { + bugsnagConfig.ReleaseStage = config.Reporting.Bugsnag.ReleaseStage + } + if config.Reporting.Bugsnag.Endpoint != "" { + bugsnagConfig.Endpoint = config.Reporting.Bugsnag.Endpoint + } + bugsnag.Configure(bugsnagConfig) + + // configure logrus bugsnag hook + hook, err := logrus_bugsnag.NewBugsnagHook() + if err != nil { + log.Fatalln(err) + } + + log.AddHook(hook) +} + // panicHandler add an HTTP handler to web app. The handler recover the happening // panic. logrus.Panic transmits panic message to pre-config log hooks, which is // defined in config.yml. diff --git a/vendor.conf b/vendor.conf index 7aec1d05..a249caf2 100644 --- a/vendor.conf +++ b/vendor.conf @@ -28,6 +28,7 @@ github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd +github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 diff --git a/vendor/github.com/Shopify/logrus-bugsnag/LICENSE b/vendor/github.com/Shopify/logrus-bugsnag/LICENSE new file mode 100644 index 00000000..a3f09f7f --- /dev/null +++ b/vendor/github.com/Shopify/logrus-bugsnag/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Shopify + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Shopify/logrus-bugsnag/README.md b/vendor/github.com/Shopify/logrus-bugsnag/README.md new file mode 100644 index 00000000..6cbc79d4 --- /dev/null +++ b/vendor/github.com/Shopify/logrus-bugsnag/README.md @@ -0,0 +1,24 @@ +## logrus-bugsnag + +[![Build Status](https://travis-ci.org/Shopify/logrus-bugsnag.svg)](https://travis-ci.org/Shopify/logrus-bugsnag) + +logrus-bugsnag is a hook that allows [Logrus](https://github.com/sirupsen/logrus) to interface with [Bugsnag](https://bugsnag.com). + +#### Usage + +```go +import ( + log "github.com/sirupsen/logrus" + "github.com/Shopify/logrus-bugsnag" + bugsnag "github.com/bugsnag/bugsnag-go" +) + +func init() { + bugsnag.Configure(bugsnag.Configuration{ + APIKey: apiKey, + }) + hook, err := logrus_bugsnag.NewBugsnagHook() + logrus.StandardLogger().Hooks.Add(hook) +} +``` + diff --git a/vendor/github.com/Shopify/logrus-bugsnag/bugsnag.go b/vendor/github.com/Shopify/logrus-bugsnag/bugsnag.go new file mode 100644 index 00000000..31ee5a08 --- /dev/null +++ b/vendor/github.com/Shopify/logrus-bugsnag/bugsnag.go @@ -0,0 +1,81 @@ +package logrus_bugsnag + +import ( + "errors" + + "github.com/sirupsen/logrus" + "github.com/bugsnag/bugsnag-go" + bugsnag_errors "github.com/bugsnag/bugsnag-go/errors" +) + +type bugsnagHook struct{} + +// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before +// bugsnag.Configure. Bugsnag must be configured before the hook. +var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook") + +// ErrBugsnagSendFailed indicates that the hook failed to submit an error to +// bugsnag. The error was successfully generated, but `bugsnag.Notify()` +// failed. +type ErrBugsnagSendFailed struct { + err error +} + +func (e ErrBugsnagSendFailed) Error() string { + return "failed to send error to Bugsnag: " + e.err.Error() +} + +// NewBugsnagHook initializes a logrus hook which sends exceptions to an +// exception-tracking service compatible with the Bugsnag API. Before using +// this hook, you must call bugsnag.Configure(). The returned object should be +// registered with a log via `AddHook()` +// +// Entries that trigger an Error, Fatal or Panic should now include an "error" +// field to send to Bugsnag. +func NewBugsnagHook() (*bugsnagHook, error) { + if bugsnag.Config.APIKey == "" { + return nil, ErrBugsnagUnconfigured + } + return &bugsnagHook{}, nil +} + +// skipStackFrames skips logrus stack frames before logging to Bugsnag. +const skipStackFrames = 4 + +// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the +// "error" field (or the Message if the error isn't present) and sends it off. +func (hook *bugsnagHook) Fire(entry *logrus.Entry) error { + var notifyErr error + err, ok := entry.Data["error"].(error) + if ok { + notifyErr = err + } else { + notifyErr = errors.New(entry.Message) + } + + metadata := bugsnag.MetaData{} + metadata["metadata"] = make(map[string]interface{}) + for key, val := range entry.Data { + if key != "error" { + metadata["metadata"][key] = val + } + } + + errWithStack := bugsnag_errors.New(notifyErr, skipStackFrames) + bugsnagErr := bugsnag.Notify(errWithStack, metadata) + if bugsnagErr != nil { + return ErrBugsnagSendFailed{bugsnagErr} + } + + return nil +} + +// Levels enumerates the log levels on which the error should be forwarded to +// bugsnag: everything at or above the "Error" level. +func (hook *bugsnagHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.ErrorLevel, + logrus.FatalLevel, + logrus.PanicLevel, + } +} From 9ebf151ac2f72eaa597728390e10714cb99661a4 Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Mon, 29 Oct 2018 18:19:05 -0700 Subject: [PATCH 180/276] API to retrive tag's digests Add an interface alongside TagStore that provides API to retreive digests of all manifests that a tag historically pointed to. It also includes currently linked tag. Signed-off-by: Manish Tomar --- registry/storage/tagstore.go | 34 ++++++++++++ registry/storage/tagstore_test.go | 86 ++++++++++++++++++++++++++++++- tags.go | 9 ++++ 3 files changed, 128 insertions(+), 1 deletion(-) diff --git a/registry/storage/tagstore.go b/registry/storage/tagstore.go index f80b9628..e4b1ed15 100644 --- a/registry/storage/tagstore.go +++ b/registry/storage/tagstore.go @@ -196,3 +196,37 @@ func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([ return tags, nil } + +func (ts *tagStore) Indexes(ctx context.Context, tag string) ([]digest.Digest, error) { + var tagLinkPath = func(name string, dgst digest.Digest) (string, error) { + return pathFor(manifestTagIndexEntryLinkPathSpec{ + name: name, + tag: tag, + revision: dgst, + }) + } + lbs := &linkedBlobStore{ + blobStore: ts.blobStore, + blobAccessController: &linkedBlobStatter{ + blobStore: ts.blobStore, + repository: ts.repository, + linkPathFns: []linkPathFunc{manifestRevisionLinkPath}, + }, + repository: ts.repository, + ctx: ctx, + linkPathFns: []linkPathFunc{tagLinkPath}, + linkDirectoryPathSpec: manifestTagIndexPathSpec{ + name: ts.repository.Named().Name(), + tag: tag, + }, + } + var dgsts []digest.Digest + err := lbs.Enumerate(ctx, func(dgst digest.Digest) error { + dgsts = append(dgsts, dgst) + return nil + }) + if err != nil { + return nil, err + } + return dgsts, nil +} diff --git a/registry/storage/tagstore_test.go b/registry/storage/tagstore_test.go index 314fa433..938aa69d 100644 --- a/registry/storage/tagstore_test.go +++ b/registry/storage/tagstore_test.go @@ -2,15 +2,22 @@ package storage import ( "context" + "reflect" "testing" "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver/inmemory" + digest "github.com/opencontainers/go-digest" ) type tagsTestEnv struct { ts distribution.TagService + bs distribution.BlobStore + ms distribution.ManifestService + gbs distribution.BlobStatter ctx context.Context } @@ -27,10 +34,17 @@ func testTagStore(t *testing.T) *tagsTestEnv { if err != nil { t.Fatal(err) } + ms, err := repo.Manifests(ctx) + if err != nil { + t.Fatal(err) + } return &tagsTestEnv{ ctx: ctx, ts: repo.Tags(ctx), + bs: repo.Blobs(ctx), + gbs: reg.BlobStatter(), + ms: ms, } } @@ -205,5 +219,75 @@ func TestTagLookup(t *testing.T) { if len(tags) != 2 { t.Errorf("Lookup of descB returned %d tags, expected 2", len(tags)) } - +} + +func TestTagIndexes(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + indexes, ok := tagStore.(distribution.TagIndexes) + if !ok { + t.Fatal("tagStore does not implement TagIndexes interface") + } + + conf, err := env.bs.Put(ctx, "application/octet-stream", []byte{0}) + if err != nil { + t.Fatal(err) + } + + dgstsSet := make(map[digest.Digest]bool) + for i := 0; i < 3; i++ { + layer, err := env.bs.Put(ctx, "application/octet-stream", []byte{byte(i + 1)}) + if err != nil { + t.Fatal(err) + } + m := schema2.Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: schema2.MediaTypeManifest, + }, + Config: distribution.Descriptor{ + Digest: conf.Digest, + Size: 1, + MediaType: schema2.MediaTypeImageConfig, + }, + Layers: []distribution.Descriptor{ + { + Digest: layer.Digest, + Size: 1, + MediaType: schema2.MediaTypeLayer, + }, + }, + } + dm, err := schema2.FromStruct(m) + if err != nil { + t.Fatal(err) + } + dgst, err := env.ms.Put(ctx, dm) + if err != nil { + t.Fatal(err) + } + desc, err := env.gbs.Stat(ctx, dgst) + if err != nil { + t.Fatal(err) + } + err = tagStore.Tag(ctx, "t", desc) + if err != nil { + t.Fatal(err) + } + dgstsSet[dgst] = true + } + + gotDgsts, err := indexes.Indexes(ctx, "t") + if err != nil { + t.Fatal(err) + } + gotDgstsSet := make(map[digest.Digest]bool) + for _, dgst := range gotDgsts { + gotDgstsSet[dgst] = true + } + if !reflect.DeepEqual(dgstsSet, gotDgstsSet) { + t.Fatalf("Expected digests: %v but got digests: %v", dgstsSet, gotDgstsSet) + } } diff --git a/tags.go b/tags.go index f22df2b8..60b96b27 100644 --- a/tags.go +++ b/tags.go @@ -2,6 +2,8 @@ package distribution import ( "context" + + digest "github.com/opencontainers/go-digest" ) // TagService provides access to information about tagged objects. @@ -25,3 +27,10 @@ type TagService interface { // Lookup returns the set of tags referencing the given digest. Lookup(ctx context.Context, digest Descriptor) ([]string, error) } + +// TagIndexes proves method to retreive all the digests that a tag historically pointed to +type TagIndexes interface { + // Indexes returns set of digests that this tag historically pointed to. This also includes + // currently linked digest. There is no ordering guaranteed + Indexes(ctx context.Context, tag string) ([]digest.Digest, error) +} From 1251e51ad0d8bf24517010a259b8f56c31a60fb6 Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Thu, 1 Nov 2018 10:31:08 -0700 Subject: [PATCH 181/276] better name and updated tests - use ManifestDigests name instead of Indexes - update tests to validate against multiple tags Signed-off-by: Manish Tomar --- registry/storage/tagstore.go | 2 +- registry/storage/tagstore_test.go | 51 ++++++++++++++++++++++--------- tags.go | 11 ++++--- 3 files changed, 44 insertions(+), 20 deletions(-) diff --git a/registry/storage/tagstore.go b/registry/storage/tagstore.go index e4b1ed15..2e7aaca6 100644 --- a/registry/storage/tagstore.go +++ b/registry/storage/tagstore.go @@ -197,7 +197,7 @@ func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([ return tags, nil } -func (ts *tagStore) Indexes(ctx context.Context, tag string) ([]digest.Digest, error) { +func (ts *tagStore) ManifestDigests(ctx context.Context, tag string) ([]digest.Digest, error) { var tagLinkPath = func(name string, dgst digest.Digest) (string, error) { return pathFor(manifestTagIndexEntryLinkPathSpec{ name: name, diff --git a/registry/storage/tagstore_test.go b/registry/storage/tagstore_test.go index 938aa69d..6f4c6c91 100644 --- a/registry/storage/tagstore_test.go +++ b/registry/storage/tagstore_test.go @@ -226,9 +226,9 @@ func TestTagIndexes(t *testing.T) { tagStore := env.ts ctx := env.ctx - indexes, ok := tagStore.(distribution.TagIndexes) + md, ok := tagStore.(distribution.TagManifestsProvider) if !ok { - t.Fatal("tagStore does not implement TagIndexes interface") + t.Fatal("tagStore does not implement TagManifestDigests interface") } conf, err := env.bs.Put(ctx, "application/octet-stream", []byte{0}) @@ -236,8 +236,9 @@ func TestTagIndexes(t *testing.T) { t.Fatal(err) } - dgstsSet := make(map[digest.Digest]bool) - for i := 0; i < 3; i++ { + t1Dgsts := make(map[digest.Digest]struct{}) + t2Dgsts := make(map[digest.Digest]struct{}) + for i := 0; i < 5; i++ { layer, err := env.bs.Put(ctx, "application/octet-stream", []byte{byte(i + 1)}) if err != nil { t.Fatal(err) @@ -272,22 +273,44 @@ func TestTagIndexes(t *testing.T) { if err != nil { t.Fatal(err) } - err = tagStore.Tag(ctx, "t", desc) - if err != nil { - t.Fatal(err) + if i < 3 { + // tag first 3 manifests as "t1" + err = tagStore.Tag(ctx, "t1", desc) + if err != nil { + t.Fatal(err) + } + t1Dgsts[dgst] = struct{}{} + } else { + // the last two under "t2" + err = tagStore.Tag(ctx, "t2", desc) + if err != nil { + t.Fatal(err) + } + t2Dgsts[dgst] = struct{}{} } - dgstsSet[dgst] = true } - gotDgsts, err := indexes.Indexes(ctx, "t") + gotT1Dgsts, err := md.ManifestDigests(ctx, "t1") if err != nil { t.Fatal(err) } - gotDgstsSet := make(map[digest.Digest]bool) - for _, dgst := range gotDgsts { - gotDgstsSet[dgst] = true + if !reflect.DeepEqual(t1Dgsts, digestMap(gotT1Dgsts)) { + t.Fatalf("Expected digests: %v but got digests: %v", t1Dgsts, digestMap(gotT1Dgsts)) } - if !reflect.DeepEqual(dgstsSet, gotDgstsSet) { - t.Fatalf("Expected digests: %v but got digests: %v", dgstsSet, gotDgstsSet) + + gotT2Dgsts, err := md.ManifestDigests(ctx, "t2") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(t2Dgsts, digestMap(gotT2Dgsts)) { + t.Fatalf("Expected digests: %v but got digests: %v", t2Dgsts, digestMap(gotT2Dgsts)) } } + +func digestMap(dgsts []digest.Digest) map[digest.Digest]struct{} { + set := make(map[digest.Digest]struct{}) + for _, dgst := range dgsts { + set[dgst] = struct{}{} + } + return set +} diff --git a/tags.go b/tags.go index 60b96b27..f0e0bea3 100644 --- a/tags.go +++ b/tags.go @@ -28,9 +28,10 @@ type TagService interface { Lookup(ctx context.Context, digest Descriptor) ([]string, error) } -// TagIndexes proves method to retreive all the digests that a tag historically pointed to -type TagIndexes interface { - // Indexes returns set of digests that this tag historically pointed to. This also includes - // currently linked digest. There is no ordering guaranteed - Indexes(ctx context.Context, tag string) ([]digest.Digest, error) +// TagManifestsProvider provides method to retreive the digests of manifests that a tag historically +// pointed to +type TagManifestsProvider interface { + // ManifestDigests returns set of digests that this tag historically pointed to. This also + // includes currently linked digest. There is no ordering guaranteed + ManifestDigests(ctx context.Context, tag string) ([]digest.Digest, error) } From 63f6c1205d5d0de48001532489ea69bf23d8ee87 Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Wed, 28 Nov 2018 10:40:29 -0800 Subject: [PATCH 182/276] Add GOARM flag to dockerfile When building with arm on alpine, GOARM should be set to 6 by default. Signed-off-by: Derek McGowan --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index abf8d1a0..612e62ce 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,6 +5,7 @@ ENV DOCKER_BUILDTAGS include_oss include_gcs ARG GOOS=linux ARG GOARCH=amd64 +ARG GOARM=6 RUN set -ex \ && apk add --no-cache make git file From 08c6bbed05f011bd6424189bebbec1877dac3fa6 Mon Sep 17 00:00:00 2001 From: Ryan Abrams Date: Thu, 29 Nov 2018 16:18:40 -0800 Subject: [PATCH 183/276] Release notes for 2.7 Signed-off-by: Ryan Abrams --- releases/{v2.7.0-rc.toml => v2.7.0.toml} | 36 ++++++++++++++++++++---- version/version.go | 2 +- 2 files changed, 32 insertions(+), 6 deletions(-) rename releases/{v2.7.0-rc.toml => v2.7.0.toml} (53%) diff --git a/releases/v2.7.0-rc.toml b/releases/v2.7.0.toml similarity index 53% rename from releases/v2.7.0-rc.toml rename to releases/v2.7.0.toml index a0ec95aa..89bcbbd5 100644 --- a/releases/v2.7.0-rc.toml +++ b/releases/v2.7.0.toml @@ -7,7 +7,7 @@ github_repo = "docker/distribution" # previous release previous = "v2.6.0" -pre_release = true +pre_release = false preface = """\ The 2.7 registry release has been a long time coming and represents both @@ -25,17 +25,43 @@ be preserved exactly without conversion to older types. With this change, clients which implement OCI can feel comfortable creating OCI images as part of their container image build process. -## Specification Donation +### Specification Donation The Distribution specification which has had 4 years of review, implementation, and production use is now part of OCI. As part of that move, specification changes will no longer be accepted in the open source registry and should instead go to [OCI's distribution-spec](https://github.com/opencontainers/distribution-spec/issues). -## Bug fixes +## Bug Fixes and Improvements -Many many fixes and improvements, see the change log below -""" +### General +* Update Go version to 1.11 +* Switch to multi-stage Dockerfile +* Validations enabled by default with new `disabled` config option +* Optimize health check performance +* Create separate permission for deleting objects in a repo +* Fix storage driver error propagation for manifest GETs +* Fix forwarded header resolution +* Add prometheus metrics +* Disable schema1 manifest by default (this affects docker versions `1.9` and older) +* Graceful shutdown +* TLS: remove ciphers that do not support perfect forward secrecy +* Fix registry stripping newlines from manifests +* Add bugsnag logrus hook +* Support ARM builds + +### Storage Driver +* OSS: fix current directory showing up in OSS driver.List() +* Azure: fix race condition in PutContent() +* Azure: update vendor +* S3: update AWS SDK and use AWS SDK to validate regions +* S3: remove expiration tag on multi-part uploads +* S3: improve `Walk` performance +* S3: allow bypassing cloudfront when in the same region +* S3: remove s3-goamz driver in favor of s3-aws +* Swift: update vendor + +See changelog below for full list of changes""" # notable prs to include in the release notes, 1234 is the pr number [notes] diff --git a/version/version.go b/version/version.go index 17755531..d05a377e 100644 --- a/version/version.go +++ b/version/version.go @@ -8,7 +8,7 @@ var Package = "github.com/docker/distribution" // the latest release tag by hand, always suffixed by "+unknown". During // build, it will be replaced by the actual version. The value here will be // used if the registry is run after a go get based install. -var Version = "v2.7.0-rc.0+unknown" +var Version = "v2.7.0+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. From 90bed6712616f818479c40917dcd1bab0482346c Mon Sep 17 00:00:00 2001 From: Li Yi Date: Tue, 25 Dec 2018 08:30:40 +0800 Subject: [PATCH 184/276] Support BYOK for OSS storage driver Change-Id: I423ad03e63bd38aded3abfcba49079ff2fbb3b74 Signed-off-by: Li Yi --- registry/storage/driver/oss/oss.go | 45 +- registry/storage/driver/oss/oss_test.go | 2 + vendor.conf | 2 +- .../github.com/denverdino/aliyungo/README.md | 130 ++++-- .../denverdino/aliyungo/common/client.go | 419 +++++++++++++++++- .../denverdino/aliyungo/common/endpoint.go | 208 +++++++++ .../denverdino/aliyungo/common/regions.go | 54 ++- .../denverdino/aliyungo/common/request.go | 6 +- .../denverdino/aliyungo/common/types.go | 92 ++++ .../aliyungo/oss/authenticate_callback.go | 88 ++++ .../denverdino/aliyungo/oss/client.go | 43 +- .../denverdino/aliyungo/oss/regions.go | 22 +- .../denverdino/aliyungo/util/encoding.go | 203 ++++++++- .../denverdino/aliyungo/util/signature.go | 1 - .../denverdino/aliyungo/util/util.go | 11 + 15 files changed, 1216 insertions(+), 110 deletions(-) create mode 100644 vendor/github.com/denverdino/aliyungo/common/endpoint.go create mode 100644 vendor/github.com/denverdino/aliyungo/oss/authenticate_callback.go diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index 1dcf42b8..5cd96203 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -54,6 +54,7 @@ type DriverParameters struct { ChunkSize int64 RootDirectory string Endpoint string + EncryptionKeyID string } func init() { @@ -68,11 +69,12 @@ func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (stor } type driver struct { - Client *oss.Client - Bucket *oss.Bucket - ChunkSize int64 - Encrypt bool - RootDirectory string + Client *oss.Client + Bucket *oss.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string + EncryptionKeyID string } type baseEmbed struct { @@ -132,6 +134,11 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { } } + encryptionKeyID, ok := parameters["encryptionkeyid"] + if !ok { + encryptionKeyID = "" + } + secureBool := true secure, ok := parameters["secure"] if ok { @@ -185,6 +192,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { Secure: secureBool, Internal: internalBool, Endpoint: fmt.Sprint(endpoint), + EncryptionKeyID: fmt.Sprint(encryptionKeyID), } return New(params) @@ -209,11 +217,12 @@ func New(params DriverParameters) (*Driver, error) { // if you initiated a new OSS client while another one is running on the same bucket. d := &driver{ - Client: client, - Bucket: bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - RootDirectory: params.RootDirectory, + Client: client, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + EncryptionKeyID: params.EncryptionKeyID, } return &Driver{ @@ -403,7 +412,7 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e err := d.Bucket.CopyLargeFileInParallel(d.ossPath(sourcePath), d.ossPath(destPath), d.getContentType(), getPermissions(), - oss.Options{}, + d.getOptions(), maxConcurrency) if err != nil { logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) @@ -503,7 +512,17 @@ func hasCode(err error, code string) bool { } func (d *driver) getOptions() oss.Options { - return oss.Options{ServerSideEncryption: d.Encrypt} + return oss.Options{ + ServerSideEncryption: d.Encrypt, + ServerSideEncryptionKeyID: d.EncryptionKeyID, + } +} + +func (d *driver) getCopyOptions() oss.CopyOptions { + return oss.CopyOptions{ + ServerSideEncryption: d.Encrypt, + ServerSideEncryptionKeyID: d.EncryptionKeyID, + } } func getPermissions() oss.ACL { @@ -580,7 +599,7 @@ func (w *writer) Write(p []byte) (int, error) { w.readyPart = contents } else { // Otherwise we can use the old file as the new first part - _, part, err := multi.PutPartCopy(1, oss.CopyOptions{}, w.driver.Bucket.Name+"/"+w.key) + _, part, err := multi.PutPartCopy(1, w.driver.getCopyOptions(), w.driver.Bucket.Name+"/"+w.key) if err != nil { return 0, err } diff --git a/registry/storage/driver/oss/oss_test.go b/registry/storage/driver/oss/oss_test.go index 438d9a48..d415115a 100644 --- a/registry/storage/driver/oss/oss_test.go +++ b/registry/storage/driver/oss/oss_test.go @@ -31,6 +31,7 @@ func init() { encrypt := os.Getenv("OSS_ENCRYPT") secure := os.Getenv("OSS_SECURE") endpoint := os.Getenv("OSS_ENDPOINT") + encryptionKeyID := os.Getenv("OSS_ENCRYPTIONKEYID") root, err := ioutil.TempDir("", "driver-") if err != nil { panic(err) @@ -73,6 +74,7 @@ func init() { Encrypt: encryptBool, Secure: secureBool, Endpoint: endpoint, + EncryptionKeyID: encryptionKeyID, } return New(parameters) diff --git a/vendor.conf b/vendor.conf index a249caf2..12f71672 100644 --- a/vendor.conf +++ b/vendor.conf @@ -7,7 +7,7 @@ github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 -github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 +github.com/denverdino/aliyungo 6df11717a253d9c7d4141f9af4deaa7c580cd531 github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 diff --git a/vendor/github.com/denverdino/aliyungo/README.md b/vendor/github.com/denverdino/aliyungo/README.md index fd9da9b1..e0749c4e 100644 --- a/vendor/github.com/denverdino/aliyungo/README.md +++ b/vendor/github.com/denverdino/aliyungo/README.md @@ -1,25 +1,34 @@ # AliyunGo: Go SDK for Aliyun Services -This is an unofficial Go SDK for Aliyun Services. You are welcome for contribution. +[![Build Status](https://travis-ci.org/denverdino/aliyungo.svg?branch=master)](https://travis-ci.org/denverdino/aliyungo) [![CircleCI](https://circleci.com/gh/denverdino/aliyungo.svg?style=svg)](https://circleci.com/gh/denverdino/aliyungo) [![Go Report Card](https://goreportcard.com/badge/github.com/denverdino/aliyungo)](https://goreportcard.com/report/github.com/denverdino/aliyungo) +This is an unofficial Go SDK for Aliyun services. You are welcome for contribution. + +The official SDK for Aliyun services is published. Please visit https://github.com/aliyun/alibaba-cloud-sdk-go for details ## Package Structure -* ecs: [Elastic Compute Service](https://help.aliyun.com/document_detail/ecs/open-api/summary.html) -* oss: [Open Storage Service](https://help.aliyun.com/document_detail/oss/api-reference/abstract.html) -* slb: [Server Load Balancer](https://help.aliyun.com/document_detail/slb/api-reference/brief-introduction.html) -* dns: [DNS](https://help.aliyun.com/document_detail/dns/api-reference/summary.html) -* sls: [Logging Service](https://help.aliyun.com/document_detail/sls/api/overview.html) -* ram: [Resource Access Management](https://help.aliyun.com/document_detail/ram/ram-api-reference/intro/intro.html) -* rds: [Relational Database Service](https://help.aliyun.com/document_detail/26226.html) -* cms: [Cloud Monitor Service](https://help.aliyun.com/document_detail/28615.html) -* sts: [Security Token Service](https://help.aliyun.com/document_detail/28756.html) -* dm: [Direct Mail] -(https://help.aliyun.com/document_detail/29414.html) -* common: Common libary of Aliyun Go SDK -* util: Utility helpers - - +* cdn: [Content Delivery Network](https://help.aliyun.com/document_detail/27101.html) +* cms: [Cloud Monitor Service](https://help.aliyun.com/document_detail/28615.html) +* cs: [Container Service](https://help.aliyun.com/product/25972.html) +* dm: [Direct Mail](https://help.aliyun.com/document_detail/29414.html) +* dns: [DNS](https://help.aliyun.com/document_detail/dns/api-reference/summary.html) +* ecs: [Elastic Compute Service](https://help.aliyun.com/document_detail/ecs/open-api/summary.html) +* ess: [Auto Scaling](https://help.aliyun.com/document_detail/25857.html) +* mns: [Message Service](https://help.aliyun.com/document_detail/27414.html) +* mq: [Message Queue](https://help.aliyun.com/document_detail/29532.html) +* nas: [Network Attached Storage](https://help.aliyun.com/document_detail/27518.html) +* opensearch: [OpenSearch](https://help.aliyun.com/document_detail/29118.html) +* oss: [Open Storage Service](https://help.aliyun.com/document_detail/oss/api-reference/abstract.html) +* push: [Cloud Mobile Push](https://help.aliyun.com/document_detail/30049.html) +* rds: [Relational Database Service](https://help.aliyun.com/document_detail/26226.html) +* ram: [Resource Access Management](https://help.aliyun.com/document_detail/ram/ram-api-reference/intro/intro.html) +* slb: [Server Load Balancer](https://help.aliyun.com/document_detail/slb/api-reference/brief-introduction.html) +* sls: [Logging Service](https://help.aliyun.com/document_detail/sls/api/overview.html) +* sms: [Short Message Service](https://help.aliyun.com/product/44282.html) +* sts: [Security Token Service](https://help.aliyun.com/document_detail/28756.html) +* common: Common libary of Aliyun Go SDK +* util: Utility helpers ## Quick Start @@ -27,7 +36,7 @@ This is an unofficial Go SDK for Aliyun Services. You are welcome for contributi package main import ( - "fmt" + "fmt" "github.com/denverdino/aliyungo/ecs" ) @@ -44,17 +53,26 @@ func main() { ## Documentation - * ECS: [https://godoc.org/github.com/denverdino/aliyungo/ecs](https://godoc.org/github.com/denverdino/aliyungo/ecs) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/ecs?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/ecs) - * OSS: [https://godoc.org/github.com/denverdino/aliyungo/oss](https://godoc.org/github.com/denverdino/aliyungo/oss) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/oss?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/oss) - * SLB: [https://godoc.org/github.com/denverdino/aliyungo/slb](https://godoc.org/github.com/denverdino/aliyungo/slb) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/slb?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/slb) - * DNS: [https://godoc.org/github.com/denverdino/aliyungo/dns](https://godoc.org/github.com/denverdino/aliyungo/dns) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/dns?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/dns) - * SLS: [https://godoc.org/github.com/denverdino/aliyungo/sls](https://godoc.org/github.com/denverdino/aliyungo/sls) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/sls?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/sls) - * RAM: [https://godoc.org/github.com/denverdino/aliyungo/ram](https://godoc.org/github.com/denverdino/aliyungo/ram) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/ram?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/ram) - * RDS: [https://godoc.org/github.com/denverdino/aliyungo/rds](https://godoc.org/github.com/denverdino/aliyungo/rds) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/rds?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/rds) - * CMS: [https://godoc.org/github.com/denverdino/aliyungo/cms](https://godoc.org/github.com/denverdino/aliyungo/cms) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/cms?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/cms) - * STS: [https://godoc.org/github.com/denverdino/aliyungo/sts](https://godoc.org/github.com/denverdino/aliyungo/sts) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/sts?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/sts) - * DM: [https://godoc.org/github.com/denverdino/aliyungo/dm](https://godoc.org/github.com/denverdino/aliyungo/dm) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/dm?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/dm) - + * CDN: [https://godoc.org/github.com/denverdino/aliyungo/cdn](https://godoc.org/github.com/denverdino/aliyungo/cdn)[![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/cdn?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/cdn) + * CMS: [https://godoc.org/github.com/denverdino/aliyungo/cms](https://godoc.org/github.com/denverdino/aliyungo/cms) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/cms?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/cms) + * CS: [https://godoc.org/github.com/denverdino/aliyungo/cs](https://godoc.org/github.com/denverdino/aliyungo/cs) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/cs?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/cs) + * DM: [https://godoc.org/github.com/denverdino/aliyungo/dm](https://godoc.org/github.com/denverdino/aliyungo/dm) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/dm?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/dm) + * DNS: [https://godoc.org/github.com/denverdino/aliyungo/dns](https://godoc.org/github.com/denverdino/aliyungo/dns) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/dns?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/dns) + * ECS: [https://godoc.org/github.com/denverdino/aliyungo/ecs](https://godoc.org/github.com/denverdino/aliyungo/ecs) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/ecs?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/ecs) + * ESS: [https://godoc.org/github.com/denverdino/aliyungo/ess](https://godoc.org/github.com/denverdino/aliyungo/ess)[![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/ess?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/ess) + * MNS: [https://godoc.org/github.com/denverdino/aliyungo/mns](https://godoc.org/github.com/denverdino/aliyungo/mns)[![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/mns?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/mns) + * MQ: [https://godoc.org/github.com/denverdino/aliyungo/mq](https://godoc.org/github.com/denverdino/aliyungo/mq) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/mq?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/mq) + * NAS: [https://godoc.org/github.com/denverdino/aliyungo/nas](https://godoc.org/github.com/denverdino/aliyungo/nas) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/nas?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/nas) + * OPENSEARCH: [https://godoc.org/github.com/denverdino/aliyungo/opensearch](https://godoc.org/github.com/denverdino/aliyungo/opensearch) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/opensearch?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/opensearch) + * OSS: [https://godoc.org/github.com/denverdino/aliyungo/oss](https://godoc.org/github.com/denverdino/aliyungo/oss) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/oss?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/oss) + * PUSH: [https://godoc.org/github.com/denverdino/aliyungo/push](https://godoc.org/github.com/denverdino/aliyungo/push) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/push?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/push) + * RAM: [https://godoc.org/github.com/denverdino/aliyungo/ram](https://godoc.org/github.com/denverdino/aliyungo/ram) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/ram?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/ram) + * RDS: [https://godoc.org/github.com/denverdino/aliyungo/rds](https://godoc.org/github.com/denverdino/aliyungo/rds) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/rds?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/rds) + * SLB: [https://godoc.org/github.com/denverdino/aliyungo/slb](https://godoc.org/github.com/denverdino/aliyungo/slb) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/slb?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/slb) + * SLS: [https://godoc.org/github.com/denverdino/aliyungo/sls](https://godoc.org/github.com/denverdino/aliyungo/sls) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/sls?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/sls) + * SMS: [https://godoc.org/github.com/denverdino/aliyungo/sms](https://godoc.org/github.com/denverdino/aliyungo/sms) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/sms?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/sms) + * STS: [https://godoc.org/github.com/denverdino/aliyungo/sts](https://godoc.org/github.com/denverdino/aliyungo/sts) [![GoDoc](https://godoc.org/github.com/denverdino/aliyungo/sts?status.svg)](https://godoc.org/github.com/denverdino/aliyungo/sts) + ## Build and Install go get: @@ -63,10 +81,9 @@ go get: go get github.com/denverdino/aliyungo ``` - ## Test ECS -Modify "ecs/config_test.go" +Modify "ecs/config_test.go" ```sh TestAccessKeyId = "MY_ACCESS_KEY_ID" @@ -75,10 +92,10 @@ Modify "ecs/config_test.go" TestIAmRich = false ``` -* TestAccessKeyId: the Access Key Id -* TestAccessKeySecret: the Access Key Secret. -* TestInstanceId: the existing instance id for testing. It will be stopped and restarted during testing. -* TestIAmRich(Optional): If it is set to true, it will perform tests to create virtual machines and disks under your account. And you will pay the bill. :-) +* TestAccessKeyId: the Access Key Id +* TestAccessKeySecret: the Access Key Secret. +* TestInstanceId: the existing instance id for testing. It will be stopped and restarted during testing. +* TestIAmRich(Optional): If it is set to true, it will perform tests to create virtual machines and disks under your account. And you will pay the bill. :-) Under "ecs" and run @@ -88,7 +105,7 @@ go test ## Test OSS -Modify "oss/config_test.go" +Modify "oss/config_test.go" ```sh TestAccessKeyId = "MY_ACCESS_KEY_ID" @@ -97,11 +114,10 @@ Modify "oss/config_test.go" TestBucket = "denverdino" ``` -* TestAccessKeyId: the Access Key Id -* TestAccessKeySecret: the Access Key Secret. -* TestRegion: the region of OSS for testing -* TestBucket: the bucket name for testing - +* TestAccessKeyId: the Access Key Id +* TestAccessKeySecret: the Access Key Secret. +* TestRegion: the region of OSS for testing +* TestBucket: the bucket name for testing Under "oss" and run @@ -112,24 +128,47 @@ go test ## Contributors * Li Yi (denverdino@gmail.com) - * tgic (farmer1992@gmail.com) + * Boshi Lian (farmer1992@gmail.com) * Yu Zhou (oscarrr110@gmail.com) * Yufei Zhang * linuxlikerqq - * Changhai Yan (changhai.ych@alibaba-inc.com) + * Changhai Yan * Jizhong Jiang (jiangjizhong@gmail.com) * Kent Wang (pragkent@gmail.com) - * ringtail (zhongwei.lzw@alibaba-inc.com) + * ringtail * aiden0z (aiden0xz@gmail.com) * jimmycmh * menglingwei * mingang.he (dustgle@gmail.com) - * chainone (chainone@gmail.com) + * Young Chen (chainone@gmail.com) * johnzeng + * spacexnice (445436286@qq.com) + * xiaoheihero + * hmgle (dustgle@gmail.com) + * jzwlqx (jiangjizhong@gmail.com) + * Linhua Tan (toolchainX@gmail.com) + * Plutonist (p@vecsight.com) + * Bin Liu + * wangyue + * demonwy + * yarous224 + * yufeizyf (xazyf9111@sina.cn) + * keontang (ikeontang@gmail.com) + * Cholerae Hu (me@cholerae.com) + * Zach Bergh (berghzach@gmail.com) + * Bingshen Wang + * xiaozhu36 + * Russell (yufeiwu@gmail.com) + * zhuzhih2017 + * cheyang + * Hobo Chen + * Shuwei Yin + * Xujin Zheng (xujinzheng@gmail.com) + * Dino Lai (dinos80152@gmail.com) ## License -This project is licensed under the Apache License, Version 2.0. See [LICENSE](https://github.com/denverdino/aliyungo/blob/master/LICENSE.txt) for the full license text. +This project is licensed under the Apache License, Version 2.0. See [LICENSE](https://github.com/denverdino/aliyungo/blob/master/LICENSE.txt) for the full license text. ## Related projects @@ -137,7 +176,6 @@ This project is licensed under the Apache License, Version 2.0. See [LICENSE](ht * Aliyun OSS driver for Docker Registry V2: [Pull request](https://github.com/docker/distribution/pull/514) - ## References The GO API design of OSS refer the implementation from [https://github.com/AdRoll/goamz](https://github.com/AdRoll) diff --git a/vendor/github.com/denverdino/aliyungo/common/client.go b/vendor/github.com/denverdino/aliyungo/common/client.go index 4ed0a06f..6c00d1c1 100644 --- a/vendor/github.com/denverdino/aliyungo/common/client.go +++ b/vendor/github.com/denverdino/aliyungo/common/client.go @@ -3,35 +3,206 @@ package common import ( "bytes" "encoding/json" + "errors" + "fmt" "io/ioutil" "log" "net/http" "net/url" + "os" + "strconv" + "strings" "time" "github.com/denverdino/aliyungo/util" ) +// RemovalPolicy.N add index to array item +// RemovalPolicy=["a", "b"] => RemovalPolicy.1="a" RemovalPolicy.2="b" +type FlattenArray []string + +// string contains underline which will be replaced with dot +// SystemDisk_Category => SystemDisk.Category +type UnderlineString string + // A Client represents a client of ECS services type Client struct { AccessKeyId string //Access Key Id AccessKeySecret string //Access Key Secret + securityToken string debug bool httpClient *http.Client endpoint string version string + serviceCode string + regionID Region + businessInfo string + userAgent string } -// NewClient creates a new instance of ECS client +// Initialize properties of a client instance func (client *Client) Init(endpoint, version, accessKeyId, accessKeySecret string) { client.AccessKeyId = accessKeyId - client.AccessKeySecret = accessKeySecret + "&" + ak := accessKeySecret + if !strings.HasSuffix(ak, "&") { + ak += "&" + } + client.AccessKeySecret = ak client.debug = false - client.httpClient = &http.Client{} + handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout")) + if err != nil { + handshakeTimeout = 0 + } + if handshakeTimeout == 0 { + client.httpClient = &http.Client{} + } else { + t := &http.Transport{ + TLSHandshakeTimeout: time.Duration(handshakeTimeout) * time.Second} + client.httpClient = &http.Client{Transport: t} + } client.endpoint = endpoint client.version = version } +// Initialize properties of a client instance including regionID +func (client *Client) NewInit(endpoint, version, accessKeyId, accessKeySecret, serviceCode string, regionID Region) { + client.Init(endpoint, version, accessKeyId, accessKeySecret) + client.serviceCode = serviceCode + client.regionID = regionID +} + +// Intialize client object when all properties are ready +func (client *Client) InitClient() *Client { + client.debug = false + handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout")) + if err != nil { + handshakeTimeout = 0 + } + if handshakeTimeout == 0 { + client.httpClient = &http.Client{} + } else { + t := &http.Transport{ + TLSHandshakeTimeout: time.Duration(handshakeTimeout) * time.Second} + client.httpClient = &http.Client{Transport: t} + } + return client +} + +func (client *Client) NewInitForAssumeRole(endpoint, version, accessKeyId, accessKeySecret, serviceCode string, regionID Region, securityToken string) { + client.NewInit(endpoint, version, accessKeyId, accessKeySecret, serviceCode, regionID) + client.securityToken = securityToken +} + +//getLocationEndpoint +func (client *Client) getEndpointByLocation() string { + locationClient := NewLocationClient(client.AccessKeyId, client.AccessKeySecret, client.securityToken) + locationClient.SetDebug(true) + return locationClient.DescribeOpenAPIEndpoint(client.regionID, client.serviceCode) +} + +//NewClient using location service +func (client *Client) setEndpointByLocation(region Region, serviceCode, accessKeyId, accessKeySecret, securityToken string) { + locationClient := NewLocationClient(accessKeyId, accessKeySecret, securityToken) + locationClient.SetDebug(true) + ep := locationClient.DescribeOpenAPIEndpoint(region, serviceCode) + if ep == "" { + ep = loadEndpointFromFile(region, serviceCode) + } + + if ep != "" { + client.endpoint = ep + } +} + +// Ensure all necessary properties are valid +func (client *Client) ensureProperties() error { + var msg string + + if client.endpoint == "" { + msg = fmt.Sprintf("endpoint cannot be empty!") + } else if client.version == "" { + msg = fmt.Sprintf("version cannot be empty!") + } else if client.AccessKeyId == "" { + msg = fmt.Sprintf("AccessKeyId cannot be empty!") + } else if client.AccessKeySecret == "" { + msg = fmt.Sprintf("AccessKeySecret cannot be empty!") + } + + if msg != "" { + return errors.New(msg) + } + + return nil +} + +// ---------------------------------------------------- +// WithXXX methods +// ---------------------------------------------------- + +// WithEndpoint sets custom endpoint +func (client *Client) WithEndpoint(endpoint string) *Client { + client.SetEndpoint(endpoint) + return client +} + +// WithVersion sets custom version +func (client *Client) WithVersion(version string) *Client { + client.SetVersion(version) + return client +} + +// WithRegionID sets Region ID +func (client *Client) WithRegionID(regionID Region) *Client { + client.SetRegionID(regionID) + return client +} + +//WithServiceCode sets serviceCode +func (client *Client) WithServiceCode(serviceCode string) *Client { + client.SetServiceCode(serviceCode) + return client +} + +// WithAccessKeyId sets new AccessKeyId +func (client *Client) WithAccessKeyId(id string) *Client { + client.SetAccessKeyId(id) + return client +} + +// WithAccessKeySecret sets new AccessKeySecret +func (client *Client) WithAccessKeySecret(secret string) *Client { + client.SetAccessKeySecret(secret) + return client +} + +// WithSecurityToken sets securityToken +func (client *Client) WithSecurityToken(securityToken string) *Client { + client.SetSecurityToken(securityToken) + return client +} + +// WithDebug sets debug mode to log the request/response message +func (client *Client) WithDebug(debug bool) *Client { + client.SetDebug(debug) + return client +} + +// WithBusinessInfo sets business info to log the request/response message +func (client *Client) WithBusinessInfo(businessInfo string) *Client { + client.SetBusinessInfo(businessInfo) + return client +} + +// WithUserAgent sets user agent to the request/response message +func (client *Client) WithUserAgent(userAgent string) *Client { + client.SetUserAgent(userAgent) + return client +} + +// ---------------------------------------------------- +// SetXXX methods +// ---------------------------------------------------- + // SetEndpoint sets custom endpoint func (client *Client) SetEndpoint(endpoint string) { client.endpoint = endpoint @@ -42,6 +213,16 @@ func (client *Client) SetVersion(version string) { client.version = version } +// SetEndpoint sets Region ID +func (client *Client) SetRegionID(regionID Region) { + client.regionID = regionID +} + +//SetServiceCode sets serviceCode +func (client *Client) SetServiceCode(serviceCode string) { + client.serviceCode = serviceCode +} + // SetAccessKeyId sets new AccessKeyId func (client *Client) SetAccessKeyId(id string) { client.AccessKeyId = id @@ -57,11 +238,55 @@ func (client *Client) SetDebug(debug bool) { client.debug = debug } +// SetBusinessInfo sets business info to log the request/response message +func (client *Client) SetBusinessInfo(businessInfo string) { + if strings.HasPrefix(businessInfo, "/") { + client.businessInfo = businessInfo + } else if businessInfo != "" { + client.businessInfo = "/" + businessInfo + } +} + +// SetUserAgent sets user agent to the request/response message +func (client *Client) SetUserAgent(userAgent string) { + client.userAgent = userAgent +} + +//set SecurityToken +func (client *Client) SetSecurityToken(securityToken string) { + client.securityToken = securityToken +} + +func (client *Client) initEndpoint() error { + // if set any value to "CUSTOMIZED_ENDPOINT" could skip location service. + // example: export CUSTOMIZED_ENDPOINT=true + if os.Getenv("CUSTOMIZED_ENDPOINT") != "" { + return nil + } + + if client.serviceCode != "" && client.regionID != "" { + endpoint := client.getEndpointByLocation() + if endpoint == "" { + return GetCustomError("InvalidEndpoint", "endpoint is empty,pls check") + } + client.endpoint = endpoint + } + return nil +} + // Invoke sends the raw HTTP request for ECS services func (client *Client) Invoke(action string, args interface{}, response interface{}) error { + if err := client.ensureProperties(); err != nil { + return err + } + + //init endpoint + if err := client.initEndpoint(); err != nil { + return err + } request := Request{} - request.init(client.version, action, client.AccessKeyId) + request.init(client.version, action, client.AccessKeyId, client.securityToken, client.regionID) query := util.ConvertToQueryValues(request) util.SetQueryValues(args, &query) @@ -74,13 +299,15 @@ func (client *Client) Invoke(action string, args interface{}, response interface httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil) - // TODO move to util and add build val flag - httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version) - if err != nil { return GetClientError(err) } + // TODO move to util and add build val flag + httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo) + + httpReq.Header.Set("User-Agent", httpReq.UserAgent()+" "+client.userAgent) + t0 := time.Now() httpResp, err := client.httpClient.Do(httpReq) t1 := time.Now() @@ -125,6 +352,174 @@ func (client *Client) Invoke(action string, args interface{}, response interface return nil } +// Invoke sends the raw HTTP request for ECS services +func (client *Client) InvokeByFlattenMethod(action string, args interface{}, response interface{}) error { + if err := client.ensureProperties(); err != nil { + return err + } + + //init endpoint + if err := client.initEndpoint(); err != nil { + return err + } + + request := Request{} + request.init(client.version, action, client.AccessKeyId, client.securityToken, client.regionID) + + query := util.ConvertToQueryValues(request) + + util.SetQueryValueByFlattenMethod(args, &query) + + // Sign request + signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret) + + // Generate the request URL + requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature) + + httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil) + + if err != nil { + return GetClientError(err) + } + + // TODO move to util and add build val flag + httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo) + + httpReq.Header.Set("User-Agent", httpReq.UserAgent()+" "+client.userAgent) + + t0 := time.Now() + httpResp, err := client.httpClient.Do(httpReq) + t1 := time.Now() + if err != nil { + return GetClientError(err) + } + statusCode := httpResp.StatusCode + + if client.debug { + log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0)) + } + + defer httpResp.Body.Close() + body, err := ioutil.ReadAll(httpResp.Body) + + if err != nil { + return GetClientError(err) + } + + if client.debug { + var prettyJSON bytes.Buffer + err = json.Indent(&prettyJSON, body, "", " ") + log.Println(string(prettyJSON.Bytes())) + } + + if statusCode >= 400 && statusCode <= 599 { + errorResponse := ErrorResponse{} + err = json.Unmarshal(body, &errorResponse) + ecsError := &Error{ + ErrorResponse: errorResponse, + StatusCode: statusCode, + } + return ecsError + } + + err = json.Unmarshal(body, response) + //log.Printf("%++v", response) + if err != nil { + return GetClientError(err) + } + + return nil +} + +// Invoke sends the raw HTTP request for ECS services +//改进了一下上面那个方法,可以使用各种Http方法 +//2017.1.30 增加了一个path参数,用来拓展访问的地址 +func (client *Client) InvokeByAnyMethod(method, action, path string, args interface{}, response interface{}) error { + if err := client.ensureProperties(); err != nil { + return err + } + + //init endpoint + if err := client.initEndpoint(); err != nil { + return err + } + + request := Request{} + request.init(client.version, action, client.AccessKeyId, client.securityToken, client.regionID) + data := util.ConvertToQueryValues(request) + util.SetQueryValues(args, &data) + + // Sign request + signature := util.CreateSignatureForRequest(method, &data, client.AccessKeySecret) + + data.Add("Signature", signature) + // Generate the request URL + var ( + httpReq *http.Request + err error + ) + if method == http.MethodGet { + requestURL := client.endpoint + path + "?" + data.Encode() + //fmt.Println(requestURL) + httpReq, err = http.NewRequest(method, requestURL, nil) + } else { + //fmt.Println(client.endpoint + path) + httpReq, err = http.NewRequest(method, client.endpoint+path, strings.NewReader(data.Encode())) + httpReq.Header.Set("Content-Type", "application/x-www-form-urlencoded") + } + + if err != nil { + return GetClientError(err) + } + + // TODO move to util and add build val flag + httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo) + httpReq.Header.Set("User-Agent", httpReq.Header.Get("User-Agent")+" "+client.userAgent) + + t0 := time.Now() + httpResp, err := client.httpClient.Do(httpReq) + t1 := time.Now() + if err != nil { + return GetClientError(err) + } + statusCode := httpResp.StatusCode + + if client.debug { + log.Printf("Invoke %s %s %d (%v) %v", ECSRequestMethod, client.endpoint, statusCode, t1.Sub(t0), data.Encode()) + } + + defer httpResp.Body.Close() + body, err := ioutil.ReadAll(httpResp.Body) + + if err != nil { + return GetClientError(err) + } + + if client.debug { + var prettyJSON bytes.Buffer + err = json.Indent(&prettyJSON, body, "", " ") + log.Println(string(prettyJSON.Bytes())) + } + + if statusCode >= 400 && statusCode <= 599 { + errorResponse := ErrorResponse{} + err = json.Unmarshal(body, &errorResponse) + ecsError := &Error{ + ErrorResponse: errorResponse, + StatusCode: statusCode, + } + return ecsError + } + + err = json.Unmarshal(body, response) + //log.Printf("%++v", response) + if err != nil { + return GetClientError(err) + } + + return nil +} + // GenerateClientToken generates the Client Token with random string func (client *Client) GenerateClientToken() string { return util.CreateRandomString() @@ -143,3 +538,13 @@ func GetClientErrorFromString(str string) error { func GetClientError(err error) error { return GetClientErrorFromString(err.Error()) } + +func GetCustomError(code, message string) error { + return &Error{ + ErrorResponse: ErrorResponse{ + Code: code, + Message: message, + }, + StatusCode: 400, + } +} diff --git a/vendor/github.com/denverdino/aliyungo/common/endpoint.go b/vendor/github.com/denverdino/aliyungo/common/endpoint.go new file mode 100644 index 00000000..757f7a78 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/common/endpoint.go @@ -0,0 +1,208 @@ +package common + +import ( + "encoding/xml" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "time" +) + +const ( + // LocationDefaultEndpoint is the default API endpoint of Location services + locationDefaultEndpoint = "https://location.aliyuncs.com" + locationAPIVersion = "2015-06-12" + HTTP_PROTOCOL = "http" + HTTPS_PROTOCOL = "https" +) + +var ( + endpoints = make(map[Region]map[string]string) + + SpecailEnpoints = map[Region]map[string]string{ + APNorthEast1: { + "ecs": "https://ecs.ap-northeast-1.aliyuncs.com", + "slb": "https://slb.ap-northeast-1.aliyuncs.com", + "rds": "https://rds.ap-northeast-1.aliyuncs.com", + "vpc": "https://vpc.ap-northeast-1.aliyuncs.com", + }, + APSouthEast2: { + "ecs": "https://ecs.ap-southeast-2.aliyuncs.com", + "slb": "https://slb.ap-southeast-2.aliyuncs.com", + "rds": "https://rds.ap-southeast-2.aliyuncs.com", + "vpc": "https://vpc.ap-southeast-2.aliyuncs.com", + }, + APSouthEast3: { + "ecs": "https://ecs.ap-southeast-3.aliyuncs.com", + "slb": "https://slb.ap-southeast-3.aliyuncs.com", + "rds": "https://rds.ap-southeast-3.aliyuncs.com", + "vpc": "https://vpc.ap-southeast-3.aliyuncs.com", + }, + MEEast1: { + "ecs": "https://ecs.me-east-1.aliyuncs.com", + "slb": "https://slb.me-east-1.aliyuncs.com", + "rds": "https://rds.me-east-1.aliyuncs.com", + "vpc": "https://vpc.me-east-1.aliyuncs.com", + }, + EUCentral1: { + "ecs": "https://ecs.eu-central-1.aliyuncs.com", + "slb": "https://slb.eu-central-1.aliyuncs.com", + "rds": "https://rds.eu-central-1.aliyuncs.com", + "vpc": "https://vpc.eu-central-1.aliyuncs.com", + }, + EUWest1: { + "ecs": "https://ecs.eu-west-1.aliyuncs.com", + "slb": "https://slb.eu-west-1.aliyuncs.com", + "rds": "https://rds.eu-west-1.aliyuncs.com", + "vpc": "https://vpc.eu-west-1.aliyuncs.com", + }, + Zhangjiakou: { + "ecs": "https://ecs.cn-zhangjiakou.aliyuncs.com", + "slb": "https://slb.cn-zhangjiakou.aliyuncs.com", + "rds": "https://rds.cn-zhangjiakou.aliyuncs.com", + "vpc": "https://vpc.cn-zhangjiakou.aliyuncs.com", + }, + Huhehaote: { + "ecs": "https://ecs.cn-huhehaote.aliyuncs.com", + "slb": "https://slb.cn-huhehaote.aliyuncs.com", + "rds": "https://rds.cn-huhehaote.aliyuncs.com", + "vpc": "https://vpc.cn-huhehaote.aliyuncs.com", + }, + } +) + +//init endpoints from file +func init() { + +} + +type LocationClient struct { + Client +} + +func NewLocationClient(accessKeyId, accessKeySecret, securityToken string) *LocationClient { + endpoint := os.Getenv("LOCATION_ENDPOINT") + if endpoint == "" { + endpoint = locationDefaultEndpoint + } + + client := &LocationClient{} + client.Init(endpoint, locationAPIVersion, accessKeyId, accessKeySecret) + client.securityToken = securityToken + return client +} + +func NewLocationClientWithSecurityToken(accessKeyId, accessKeySecret, securityToken string) *LocationClient { + endpoint := os.Getenv("LOCATION_ENDPOINT") + if endpoint == "" { + endpoint = locationDefaultEndpoint + } + + client := &LocationClient{} + client.WithEndpoint(endpoint). + WithVersion(locationAPIVersion). + WithAccessKeyId(accessKeyId). + WithAccessKeySecret(accessKeySecret). + WithSecurityToken(securityToken). + InitClient() + return client +} + +func (client *LocationClient) DescribeEndpoint(args *DescribeEndpointArgs) (*DescribeEndpointResponse, error) { + response := &DescribeEndpointResponse{} + err := client.Invoke("DescribeEndpoint", args, response) + if err != nil { + return nil, err + } + return response, err +} + +func (client *LocationClient) DescribeEndpoints(args *DescribeEndpointsArgs) (*DescribeEndpointsResponse, error) { + response := &DescribeEndpointsResponse{} + err := client.Invoke("DescribeEndpoints", args, response) + if err != nil { + return nil, err + } + return response, err +} + +func getProductRegionEndpoint(region Region, serviceCode string) string { + if sp, ok := endpoints[region]; ok { + if endpoint, ok := sp[serviceCode]; ok { + return endpoint + } + } + + return "" +} + +func setProductRegionEndpoint(region Region, serviceCode string, endpoint string) { + endpoints[region] = map[string]string{ + serviceCode: endpoint, + } +} + +func (client *LocationClient) DescribeOpenAPIEndpoint(region Region, serviceCode string) string { + if endpoint := getProductRegionEndpoint(region, serviceCode); endpoint != "" { + return endpoint + } + defaultProtocols := HTTP_PROTOCOL + + args := &DescribeEndpointsArgs{ + Id: region, + ServiceCode: serviceCode, + Type: "openAPI", + } + + var endpoint *DescribeEndpointsResponse + var err error + for index := 0; index < 5; index++ { + endpoint, err = client.DescribeEndpoints(args) + if err == nil && endpoint != nil && len(endpoint.Endpoints.Endpoint) > 0 { + break + } + time.Sleep(500 * time.Millisecond) + } + + if err != nil || endpoint == nil || len(endpoint.Endpoints.Endpoint) <= 0 { + log.Printf("aliyungo: can not get endpoint from service, use default. endpoint=[%v], error=[%v]\n", endpoint, err) + return "" + } + + for _, protocol := range endpoint.Endpoints.Endpoint[0].Protocols.Protocols { + if strings.ToLower(protocol) == HTTPS_PROTOCOL { + defaultProtocols = HTTPS_PROTOCOL + break + } + } + + ep := fmt.Sprintf("%s://%s", defaultProtocols, endpoint.Endpoints.Endpoint[0].Endpoint) + + setProductRegionEndpoint(region, serviceCode, ep) + return ep +} + +func loadEndpointFromFile(region Region, serviceCode string) string { + data, err := ioutil.ReadFile("./endpoints.xml") + if err != nil { + return "" + } + var endpoints Endpoints + err = xml.Unmarshal(data, &endpoints) + if err != nil { + return "" + } + for _, endpoint := range endpoints.Endpoint { + if endpoint.RegionIds.RegionId == string(region) { + for _, product := range endpoint.Products.Product { + if strings.ToLower(product.ProductName) == serviceCode { + return fmt.Sprintf("%s://%s", HTTPS_PROTOCOL, product.DomainName) + } + } + } + } + + return "" +} diff --git a/vendor/github.com/denverdino/aliyungo/common/regions.go b/vendor/github.com/denverdino/aliyungo/common/regions.go index 9ceda465..e6bc728f 100644 --- a/vendor/github.com/denverdino/aliyungo/common/regions.go +++ b/vendor/github.com/denverdino/aliyungo/common/regions.go @@ -5,15 +5,51 @@ type Region string // Constants of region definition const ( - Hangzhou = Region("cn-hangzhou") - Qingdao = Region("cn-qingdao") - Beijing = Region("cn-beijing") - Hongkong = Region("cn-hongkong") - Shenzhen = Region("cn-shenzhen") - USWest1 = Region("us-west-1") - USEast1 = Region("us-east-1") + Hangzhou = Region("cn-hangzhou") + Qingdao = Region("cn-qingdao") + Beijing = Region("cn-beijing") + Hongkong = Region("cn-hongkong") + Shenzhen = Region("cn-shenzhen") + Shanghai = Region("cn-shanghai") + Zhangjiakou = Region("cn-zhangjiakou") + Huhehaote = Region("cn-huhehaote") + APSouthEast1 = Region("ap-southeast-1") - Shanghai = Region("cn-shanghai") + APNorthEast1 = Region("ap-northeast-1") + APSouthEast2 = Region("ap-southeast-2") + APSouthEast3 = Region("ap-southeast-3") + APSouthEast5 = Region("ap-southeast-5") + + APSouth1 = Region("ap-south-1") + + USWest1 = Region("us-west-1") + USEast1 = Region("us-east-1") + + MEEast1 = Region("me-east-1") + + EUCentral1 = Region("eu-central-1") + EUWest1 = Region("eu-west-1") + + ShenZhenFinance = Region("cn-shenzhen-finance-1") + ShanghaiFinance = Region("cn-shanghai-finance-1") ) -var ValidRegions = []Region{Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, USWest1, USEast1, APSouthEast1} +var ValidRegions = []Region{ + Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, Zhangjiakou, Huhehaote, + USWest1, USEast1, + APNorthEast1, APSouthEast1, APSouthEast2, APSouthEast3, APSouthEast5, + APSouth1, + MEEast1, + EUCentral1, EUWest1, + ShenZhenFinance, ShanghaiFinance, +} + +// IsValidRegion checks if r is an Ali supported region. +func IsValidRegion(r string) bool { + for _, v := range ValidRegions { + if r == string(v) { + return true + } + } + return false +} diff --git a/vendor/github.com/denverdino/aliyungo/common/request.go b/vendor/github.com/denverdino/aliyungo/common/request.go index 2a883f19..f35c2990 100644 --- a/vendor/github.com/denverdino/aliyungo/common/request.go +++ b/vendor/github.com/denverdino/aliyungo/common/request.go @@ -20,7 +20,9 @@ const ( type Request struct { Format string Version string + RegionId Region AccessKeyId string + SecurityToken string Signature string SignatureMethod string Timestamp util.ISO6801Time @@ -30,7 +32,7 @@ type Request struct { Action string } -func (request *Request) init(version string, action string, AccessKeyId string) { +func (request *Request) init(version string, action string, AccessKeyId string, securityToken string, regionId Region) { request.Format = JSONResponseFormat request.Timestamp = util.NewISO6801Time(time.Now().UTC()) request.Version = version @@ -39,6 +41,8 @@ func (request *Request) init(version string, action string, AccessKeyId string) request.SignatureNonce = util.CreateRandomString() request.Action = action request.AccessKeyId = AccessKeyId + request.SecurityToken = securityToken + request.RegionId = regionId } type Response struct { diff --git a/vendor/github.com/denverdino/aliyungo/common/types.go b/vendor/github.com/denverdino/aliyungo/common/types.go index c562aedf..cf161f11 100644 --- a/vendor/github.com/denverdino/aliyungo/common/types.go +++ b/vendor/github.com/denverdino/aliyungo/common/types.go @@ -13,3 +13,95 @@ const ( PrePaid = InstanceChargeType("PrePaid") PostPaid = InstanceChargeType("PostPaid") ) + +type DescribeEndpointArgs struct { + Id Region + ServiceCode string + Type string +} + +type EndpointItem struct { + Protocols struct { + Protocols []string + } + Type string + Namespace string + Id Region + SerivceCode string + Endpoint string +} + +type DescribeEndpointResponse struct { + Response + EndpointItem +} + +type DescribeEndpointsArgs struct { + Id Region + ServiceCode string + Type string +} + +type DescribeEndpointsResponse struct { + Response + Endpoints APIEndpoints + RequestId string + Success bool +} + +type APIEndpoints struct { + Endpoint []EndpointItem +} + +type NetType string + +const ( + Internet = NetType("Internet") + Intranet = NetType("Intranet") +) + +type TimeType string + +const ( + Hour = TimeType("Hour") + Day = TimeType("Day") + Week = TimeType("Week") + Month = TimeType("Month") + Year = TimeType("Year") +) + +type NetworkType string + +const ( + Classic = NetworkType("Classic") + VPC = NetworkType("VPC") +) + +type BusinessInfo struct { + Pack string `json:"pack,omitempty"` + ActivityId string `json:"activityId,omitempty"` +} + +//xml +type Endpoints struct { + Endpoint []Endpoint `xml:"Endpoint"` +} + +type Endpoint struct { + Name string `xml:"name,attr"` + RegionIds RegionIds `xml:"RegionIds"` + Products Products `xml:"Products"` +} + +type RegionIds struct { + RegionId string `xml:"RegionId"` +} + +type Products struct { + Product []Product `xml:"Product"` +} + +type Product struct { + ProductName string `xml:"ProductName"` + DomainName string `xml:"DomainName"` +} diff --git a/vendor/github.com/denverdino/aliyungo/oss/authenticate_callback.go b/vendor/github.com/denverdino/aliyungo/oss/authenticate_callback.go new file mode 100644 index 00000000..74b58fb0 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/oss/authenticate_callback.go @@ -0,0 +1,88 @@ +package oss + +import ( + "crypto" + "crypto/md5" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "io/ioutil" + "net/http" + "regexp" + "strings" + "sync" +) + +type authenticationType struct { + lock *sync.RWMutex + certificate map[string]*rsa.PublicKey +} + +var ( + authentication = authenticationType{lock: &sync.RWMutex{}, certificate: map[string]*rsa.PublicKey{}} + urlReg = regexp.MustCompile(`^http(|s)://gosspublic.alicdn.com/[0-9a-zA-Z]`) +) + +//验证OSS向业务服务器发来的回调函数。 +//该方法是并发安全的 +//pubKeyUrl 回调请求头中[x-oss-pub-key-url]一项,以Base64编码 +//reqUrl oss所发来请求的url,由path+query组成 +//reqBody oss所发来请求的body +//authorization authorization为回调头中的签名 +func AuthenticateCallBack(pubKeyUrl, reqUrl, reqBody, authorization string) error { + //获取证书url + keyURL, err := base64.URLEncoding.DecodeString(pubKeyUrl) + if err != nil { + return err + } + url := string(keyURL) + //判断证书是否来自于阿里云 + if !urlReg.Match(keyURL) { + return errors.New("certificate address error") + } + //获取文件名 + rs := []rune(url) + filename := string(rs[strings.LastIndex(url, "/") : len(rs)-1]) + authentication.lock.RLock() + certificate := authentication.certificate[filename] + authentication.lock.RUnlock() + //内存中没有证书,下载 + if certificate == nil { + authentication.lock.Lock() + res, err := http.Get(url) + if err != nil { + return err + } + defer res.Body.Close() + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return err + } + block, _ := pem.Decode(body) + if block == nil { + return errors.New("certificate error") + } + pubKey, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return err + } + certificate = pubKey.(*rsa.PublicKey) + authentication.certificate[filename] = certificate + authentication.lock.Unlock() + } + //证书准备完毕,开始验证 + //解析签名 + signature, err := base64.StdEncoding.DecodeString(authorization) + if err != nil { + return err + } + hashed := md5.New() + hashed.Write([]byte(reqUrl + "\n" + reqBody)) + if err := rsa.VerifyPKCS1v15(certificate, crypto.MD5, hashed.Sum(nil), signature); err != nil { + return err + } + //验证通过 + return nil +} diff --git a/vendor/github.com/denverdino/aliyungo/oss/client.go b/vendor/github.com/denverdino/aliyungo/oss/client.go index c5e13e51..7b5a55b8 100644 --- a/vendor/github.com/denverdino/aliyungo/oss/client.go +++ b/vendor/github.com/denverdino/aliyungo/oss/client.go @@ -57,12 +57,14 @@ type Owner struct { // Options struct // type Options struct { - ServerSideEncryption bool - Meta map[string][]string - ContentEncoding string - CacheControl string - ContentMD5 string - ContentDisposition string + ServerSideEncryption bool + ServerSideEncryptionKeyID string + + Meta map[string][]string + ContentEncoding string + CacheControl string + ContentMD5 string + ContentDisposition string //Range string //Expires int } @@ -72,6 +74,9 @@ type CopyOptions struct { CopySourceOptions string MetadataDirective string //ContentType string + + ServerSideEncryption bool + ServerSideEncryptionKeyID string } // CopyObjectResult is the output from a Copy request @@ -366,7 +371,7 @@ func (b *Bucket) Exists(path string) (exists bool, err error) { } if err != nil { - // We can treat a 403 or 404 as non existance + // We can treat a 403 or 404 as non existence if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { return false, nil } @@ -430,7 +435,7 @@ func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, option func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { headers := make(http.Header) - headers.Set("x-oss-acl", string(perm)) + headers.Set("x-oss-object-acl", string(perm)) headers.Set("x-oss-copy-source", source) options.addHeaders(headers) @@ -455,7 +460,7 @@ func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType stri headers := make(http.Header) headers.Set("Content-Length", strconv.FormatInt(length, 10)) headers.Set("Content-Type", contType) - headers.Set("x-oss-acl", string(perm)) + headers.Set("x-oss-object-acl", string(perm)) options.addHeaders(headers) req := &request{ @@ -491,7 +496,10 @@ func (b *Bucket) PutFile(path string, file *os.File, perm ACL, options Options) // addHeaders adds o's specified fields to headers func (o Options) addHeaders(headers http.Header) { - if o.ServerSideEncryption { + if len(o.ServerSideEncryptionKeyID) != 0 { + headers.Set("x-oss-server-side-encryption", "KMS") + headers.Set("x-oss-server-side-encryption-key-id", o.ServerSideEncryptionKeyID) + } else if o.ServerSideEncryption { headers.Set("x-oss-server-side-encryption", "AES256") } if len(o.ContentEncoding) != 0 { @@ -516,6 +524,13 @@ func (o Options) addHeaders(headers http.Header) { // addHeaders adds o's specified fields to headers func (o CopyOptions) addHeaders(headers http.Header) { + if len(o.ServerSideEncryptionKeyID) != 0 { + headers.Set("x-oss-server-side-encryption", "KMS") + headers.Set("x-oss-server-side-encryption-key-id", o.ServerSideEncryptionKeyID) + } else if o.ServerSideEncryption { + headers.Set("x-oss-server-side-encryption", "AES256") + } + if len(o.MetadataDirective) != 0 { headers.Set("x-oss-metadata-directive", o.MetadataDirective) } @@ -1100,7 +1115,7 @@ func (client *Client) setupHttpRequest(req *request) (*http.Request, error) { // body will be unmarshalled on it. func (client *Client) doHttpRequest(c *http.Client, hreq *http.Request, resp interface{}) (*http.Response, error) { - if true { + if client.debug { log.Printf("%s %s ...\n", hreq.Method, hreq.URL.String()) } hresp, err := c.Do(hreq) @@ -1323,9 +1338,9 @@ func (b *Bucket) CopyLargeFileInParallel(sourcePath string, destPath string, con } currentLength, err := b.GetContentLength(sourcePath) - - log.Printf("Parallel Copy large file[size: %d] from %s to %s\n",currentLength, sourcePath, destPath) - + + log.Printf("Parallel Copy large file[size: %d] from %s to %s\n", currentLength, sourcePath, destPath) + if err != nil { return err } diff --git a/vendor/github.com/denverdino/aliyungo/oss/regions.go b/vendor/github.com/denverdino/aliyungo/oss/regions.go index 0f250604..89bac838 100644 --- a/vendor/github.com/denverdino/aliyungo/oss/regions.go +++ b/vendor/github.com/denverdino/aliyungo/oss/regions.go @@ -9,15 +9,25 @@ type Region string // Constants of region definition const ( - Hangzhou = Region("oss-cn-hangzhou") - Qingdao = Region("oss-cn-qingdao") - Beijing = Region("oss-cn-beijing") - Hongkong = Region("oss-cn-hongkong") - Shenzhen = Region("oss-cn-shenzhen") + Hangzhou = Region("oss-cn-hangzhou") + Qingdao = Region("oss-cn-qingdao") + Beijing = Region("oss-cn-beijing") + Hongkong = Region("oss-cn-hongkong") + Shenzhen = Region("oss-cn-shenzhen") + Shanghai = Region("oss-cn-shanghai") + Zhangjiakou = Region("oss-cn-zhangjiakou") + Huhehaote = Region("oss-cn-huhehaote") + USWest1 = Region("oss-us-west-1") USEast1 = Region("oss-us-east-1") APSouthEast1 = Region("oss-ap-southeast-1") - Shanghai = Region("oss-cn-shanghai") + APNorthEast1 = Region("oss-ap-northeast-1") + APSouthEast2 = Region("oss-ap-southeast-2") + + MEEast1 = Region("oss-me-east-1") + + EUCentral1 = Region("oss-eu-central-1") + EUWest1 = Region("oss-eu-west-1") DefaultRegion = Hangzhou ) diff --git a/vendor/github.com/denverdino/aliyungo/util/encoding.go b/vendor/github.com/denverdino/aliyungo/util/encoding.go index e545e069..ec1a5b13 100644 --- a/vendor/github.com/denverdino/aliyungo/util/encoding.go +++ b/vendor/github.com/denverdino/aliyungo/util/encoding.go @@ -7,9 +7,26 @@ import ( "net/url" "reflect" "strconv" + "strings" "time" ) +// change instance=["a", "b"] +// to instance.1="a" instance.2="b" +func FlattenFn(fieldName string, field reflect.Value, values *url.Values) { + l := field.Len() + if l > 0 { + for i := 0; i < l; i++ { + str := field.Index(i).String() + values.Set(fieldName+"."+strconv.Itoa(i+1), str) + } + } +} + +func Underline2Dot(name string) string { + return strings.Replace(name, "_", ".", -1) +} + //ConvertToQueryValues converts the struct to url.Values func ConvertToQueryValues(ifc interface{}) url.Values { values := url.Values{} @@ -22,6 +39,10 @@ func SetQueryValues(ifc interface{}, values *url.Values) { setQueryValues(ifc, values, "") } +func SetQueryValueByFlattenMethod(ifc interface{}, values *url.Values) { + setQueryValuesByFlattenMethod(ifc, values, "") +} + func setQueryValues(i interface{}, values *url.Values, prefix string) { // add to support url.Values mapValues, ok := i.(url.Values) @@ -41,28 +62,32 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { fieldName := elemType.Field(i).Name anonymous := elemType.Field(i).Anonymous + tag := elemType.Field(i).Tag.Get("query") + argName := elemType.Field(i).Tag.Get("ArgName") field := elem.Field(i) // TODO Use Tag for validation // tag := typ.Field(i).Tag.Get("tagname") kind := field.Kind() + isPtr := false if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() { continue } if kind == reflect.Ptr { field = field.Elem() kind = field.Kind() + isPtr = true } var value string //switch field.Interface().(type) { switch kind { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: i := field.Int() - if i != 0 { + if i != 0 || isPtr { value = strconv.FormatInt(i, 10) } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: i := field.Uint() - if i != 0 { + if i != 0 || isPtr { value = strconv.FormatUint(i, 10) } case reflect.Float32: @@ -93,15 +118,26 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { case reflect.String: l := field.Len() if l > 0 { - strArray := make([]string, l) - for i := 0; i < l; i++ { - strArray[i] = field.Index(i).String() - } - bytes, err := json.Marshal(strArray) - if err == nil { - value = string(bytes) + if tag == "list" { + name := argName + if argName == "" { + name = fieldName + } + for i := 0; i < l; i++ { + valueName := fmt.Sprintf("%s.%d", name, (i + 1)) + values.Set(valueName, field.Index(i).String()) + } } else { - log.Printf("Failed to convert JSON: %v", err) + strArray := make([]string, l) + for i := 0; i < l; i++ { + strArray[i] = field.Index(i).String() + } + bytes, err := json.Marshal(strArray) + if err == nil { + value = string(bytes) + } else { + log.Printf("Failed to convert JSON: %v", err) + } } } default: @@ -139,8 +175,8 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { } } if value != "" { - name := elemType.Field(i).Tag.Get("ArgName") - if name == "" { + name := argName + if argName == "" { name = fieldName } if prefix != "" { @@ -150,3 +186,146 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { } } } + +func setQueryValuesByFlattenMethod(i interface{}, values *url.Values, prefix string) { + // add to support url.Values + mapValues, ok := i.(url.Values) + if ok { + for k, _ := range mapValues { + values.Set(k, mapValues.Get(k)) + } + return + } + + elem := reflect.ValueOf(i) + if elem.Kind() == reflect.Ptr { + elem = elem.Elem() + } + elemType := elem.Type() + for i := 0; i < elem.NumField(); i++ { + + fieldName := elemType.Field(i).Name + anonymous := elemType.Field(i).Anonymous + field := elem.Field(i) + + // TODO Use Tag for validation + // tag := typ.Field(i).Tag.Get("tagname") + kind := field.Kind() + + isPtr := false + if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() { + continue + } + if kind == reflect.Ptr { + field = field.Elem() + kind = field.Kind() + isPtr = true + } + + var value string + //switch field.Interface().(type) { + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i := field.Int() + if i != 0 || isPtr { + value = strconv.FormatInt(i, 10) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + i := field.Uint() + if i != 0 || isPtr { + value = strconv.FormatUint(i, 10) + } + case reflect.Float32: + value = strconv.FormatFloat(field.Float(), 'f', 4, 32) + case reflect.Float64: + value = strconv.FormatFloat(field.Float(), 'f', 4, 64) + case reflect.Bool: + value = strconv.FormatBool(field.Bool()) + case reflect.String: + value = field.String() + case reflect.Map: + ifc := field.Interface() + m := ifc.(map[string]string) + if m != nil { + j := 0 + for k, v := range m { + j++ + keyName := fmt.Sprintf("%s.%d.Key", fieldName, j) + values.Set(keyName, k) + valueName := fmt.Sprintf("%s.%d.Value", fieldName, j) + values.Set(valueName, v) + } + } + case reflect.Slice: + if field.Type().Name() == "FlattenArray" { + FlattenFn(fieldName, field, values) + } else { + switch field.Type().Elem().Kind() { + case reflect.Uint8: + value = string(field.Bytes()) + case reflect.String: + l := field.Len() + if l > 0 { + strArray := make([]string, l) + for i := 0; i < l; i++ { + strArray[i] = field.Index(i).String() + } + bytes, err := json.Marshal(strArray) + if err == nil { + value = string(bytes) + } else { + log.Printf("Failed to convert JSON: %v", err) + } + } + default: + l := field.Len() + for j := 0; j < l; j++ { + prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1)) + ifc := field.Index(j).Interface() + //log.Printf("%s : %v", prefixName, ifc) + if ifc != nil { + setQueryValuesByFlattenMethod(ifc, values, prefixName) + } + } + continue + } + } + + default: + switch field.Interface().(type) { + case ISO6801Time: + t := field.Interface().(ISO6801Time) + value = t.String() + case time.Time: + t := field.Interface().(time.Time) + value = GetISO8601TimeStamp(t) + default: + + ifc := field.Interface() + if ifc != nil { + if anonymous { + SetQueryValues(ifc, values) + } else { + prefixName := fieldName + "." + setQueryValuesByFlattenMethod(ifc, values, prefixName) + } + continue + } + } + } + if value != "" { + name := elemType.Field(i).Tag.Get("ArgName") + if name == "" { + name = fieldName + } + if prefix != "" { + name = prefix + name + } + // NOTE: here we will change name to underline style when the type is UnderlineString + if field.Type().Name() == "UnderlineString" { + name = Underline2Dot(name) + } + values.Set(name, value) + } + } +} diff --git a/vendor/github.com/denverdino/aliyungo/util/signature.go b/vendor/github.com/denverdino/aliyungo/util/signature.go index a00b27c1..1be6d39c 100644 --- a/vendor/github.com/denverdino/aliyungo/util/signature.go +++ b/vendor/github.com/denverdino/aliyungo/util/signature.go @@ -35,6 +35,5 @@ func CreateSignatureForRequest(method string, values *url.Values, accessKeySecre canonicalizedQueryString := percentReplace(values.Encode()) stringToSign := method + "&%2F&" + url.QueryEscape(canonicalizedQueryString) - return CreateSignature(stringToSign, accessKeySecret) } diff --git a/vendor/github.com/denverdino/aliyungo/util/util.go b/vendor/github.com/denverdino/aliyungo/util/util.go index dd68214e..ff49f2d8 100644 --- a/vendor/github.com/denverdino/aliyungo/util/util.go +++ b/vendor/github.com/denverdino/aliyungo/util/util.go @@ -9,6 +9,8 @@ import ( "net/url" "sort" "time" + "fmt" + "encoding/json" ) const dictionary = "_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" @@ -145,3 +147,12 @@ func GenerateRandomECSPassword() string { return string(s) } + + +func PrettyJson(object interface{}) string { + b,err := json.MarshalIndent(object,"", " ") + if err != nil { + fmt.Printf("ERROR: PrettyJson, %v\n %s\n",err,b) + } + return string(b) +} \ No newline at end of file From b7839211af4fd038dbedcec1b06091ea02559a97 Mon Sep 17 00:00:00 2001 From: Li Yi Date: Tue, 25 Dec 2018 09:32:16 +0800 Subject: [PATCH 185/276] Update doc for BYOK support in OSS storage driver Change-Id: I195f29195f999e3e14ba921fd3435a6dc4788417 Signed-off-by: Li Yi --- docs/configuration.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index fe95d594..539c02a6 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -147,7 +147,8 @@ storage: endpoint: optional endpoints internal: optional internal endpoint bucket: OSS bucket - encrypt: optional data encryption setting + encrypt: optional enable server-side encryption + encryptionkeyid: optional KMS key id for encryption secure: optional ssl setting chunksize: optional size valye rootdirectory: optional root directory @@ -446,7 +447,8 @@ storage: endpoint: optional endpoints internal: optional internal endpoint bucket: OSS bucket - encrypt: optional data encryption setting + encrypt: optional enable server-side encryption + encryptionkeyid: optional KMS key id for encryption secure: optional ssl setting chunksize: optional size valye rootdirectory: optional root directory From eb1a2cd911f28b21375914420d5a464f45d4bd02 Mon Sep 17 00:00:00 2001 From: David Wu Date: Fri, 4 Jan 2019 11:02:57 -0800 Subject: [PATCH 186/276] default autoredirect to false Signed-off-by: David Wu --- registry/auth/token/accesscontroller.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/registry/auth/token/accesscontroller.go b/registry/auth/token/accesscontroller.go index 33d18a48..fa924f0b 100644 --- a/registry/auth/token/accesscontroller.go +++ b/registry/auth/token/accesscontroller.go @@ -162,11 +162,14 @@ func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] - autoRedirect, ok := options["autoredirect"].(bool) - if !ok { - return opts, fmt.Errorf("token auth requires a valid option bool: autoredirect") + autoRedirectVal, ok := options["autoredirect"] + if ok { + autoRedirect, ok := autoRedirectVal.(bool) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option bool: autoredirect") + } + opts.autoRedirect = autoRedirect } - opts.autoRedirect = autoRedirect return opts, nil } From 0ac367fd6bee057d404c405a298b4b7aedf301ec Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Mon, 10 Dec 2018 23:51:58 +0100 Subject: [PATCH 187/276] Add reference.ParseDockerRef utility function ParseDockerRef normalizes the image reference following the docker convention. This is added mainly for backward compatibility. The reference returned can only be either tagged or digested. For reference contains both tag and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. Signed-off-by: Sebastiaan van Stijn --- reference/normalize.go | 29 ++++++++++++++ reference/normalize_test.go | 80 +++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) diff --git a/reference/normalize.go b/reference/normalize.go index 2d71fc5e..b3dfb7a6 100644 --- a/reference/normalize.go +++ b/reference/normalize.go @@ -56,6 +56,35 @@ func ParseNormalizedNamed(s string) (Named, error) { return named, nil } +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + // splitDockerDomain splits a repository name to domain and remotename string. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. diff --git a/reference/normalize_test.go b/reference/normalize_test.go index a881972a..a636236e 100644 --- a/reference/normalize_test.go +++ b/reference/normalize_test.go @@ -623,3 +623,83 @@ func TestMatch(t *testing.T) { } } } + +func TestParseDockerRef(t *testing.T) { + testcases := []struct { + name string + input string + expected string + }{ + { + name: "nothing", + input: "busybox", + expected: "docker.io/library/busybox:latest", + }, + { + name: "tag only", + input: "busybox:latest", + expected: "docker.io/library/busybox:latest", + }, + { + name: "digest only", + input: "busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582", + expected: "docker.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582", + }, + { + name: "path only", + input: "library/busybox", + expected: "docker.io/library/busybox:latest", + }, + { + name: "hostname only", + input: "docker.io/busybox", + expected: "docker.io/library/busybox:latest", + }, + { + name: "no tag", + input: "docker.io/library/busybox", + expected: "docker.io/library/busybox:latest", + }, + { + name: "no path", + input: "docker.io/busybox:latest", + expected: "docker.io/library/busybox:latest", + }, + { + name: "no hostname", + input: "library/busybox:latest", + expected: "docker.io/library/busybox:latest", + }, + { + name: "full reference with tag", + input: "docker.io/library/busybox:latest", + expected: "docker.io/library/busybox:latest", + }, + { + name: "gcr reference without tag", + input: "gcr.io/library/busybox", + expected: "gcr.io/library/busybox:latest", + }, + { + name: "both tag and digest", + input: "gcr.io/library/busybox:latest@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582", + expected: "gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582", + }, + } + for _, test := range testcases { + t.Run(test.name, func(t *testing.T) { + normalized, err := ParseDockerRef(test.input) + if err != nil { + t.Fatal(err) + } + output := normalized.String() + if output != test.expected { + t.Fatalf("expected %q to be parsed as %v, got %v", test.input, test.expected, output) + } + _, err = Parse(output) + if err != nil { + t.Fatalf("%q should be a valid reference, but got an error: %v", output, err) + } + }) + } +} From d1abdeb62361860bed0f19d10a92ad7107543b5c Mon Sep 17 00:00:00 2001 From: Ryan Abrams Date: Fri, 4 Jan 2019 12:59:42 -0800 Subject: [PATCH 188/276] Add docs for autoredirect config parameter Config parameter is user facing so should be documented. Signed-off-by: Ryan Abrams --- docs/configuration.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/configuration.md b/docs/configuration.md index fe95d594..5b506758 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -171,6 +171,7 @@ auth: realm: silly-realm service: silly-service token: + autoredirect: true realm: token-realm service: token-service issuer: registry-token-issuer @@ -623,6 +624,7 @@ security. | `service` | yes | The service being authenticated. | | `issuer` | yes | The name of the token issuer. The issuer inserts this into the token so it must match the value configured for the issuer. | | `rootcertbundle` | yes | The absolute path to the root certificate bundle. This bundle contains the public part of the certificates used to sign authentication tokens. | +| `autoredirect` | no | When set to `true`, `realm` will automatically be set using the Host header of the request as the domain and a path of `/auth/token/`| For more information about Token based authentication configuration, see the From 7df881dcbe5f220d9080dc98ff9cfd258932edf6 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 10 Jan 2019 03:47:21 +0000 Subject: [PATCH 189/276] change default Dockerfile to install ssl utils Signed-off-by: andyzhangx --- Dockerfile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 612e62ce..ec09350e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,7 +14,11 @@ WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked" -FROM alpine +FROM alpine:3.8 + +RUN set -ex \ + && apk add --no-cache ca-certificates apache2-utils + COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry VOLUME ["/var/lib/registry"] From cdb62b2b77775db037532daad2a9678d8fe5877c Mon Sep 17 00:00:00 2001 From: Greg Rebholz Date: Tue, 8 Jan 2019 21:29:40 -0500 Subject: [PATCH 190/276] Registry - make minimum TLS version user configurable Signed-off-by: J. Gregory Rebholz --- configuration/configuration.go | 3 +++ configuration/configuration_test.go | 2 ++ docs/configuration.md | 6 ++++-- registry/registry.go | 18 +++++++++++++++++- 4 files changed, 26 insertions(+), 3 deletions(-) diff --git a/configuration/configuration.go b/configuration/configuration.go index b347d63b..690b8ae3 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -108,6 +108,9 @@ type Configuration struct { // A file may contain multiple CA certificates encoded as PEM ClientCAs []string `yaml:"clientcas,omitempty"` + // Specifies the lowest TLS version allowed + MinimumTLS string `yaml:"minimumtls,omitempty"` + // LetsEncrypt is used to configuration setting up TLS through // Let's Encrypt instead of manually specifying certificate and // key. If a TLS certificate is specified, the Let's Encrypt diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index e5f71486..d57fbf4f 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -83,6 +83,7 @@ var configStruct = Configuration{ Certificate string `yaml:"certificate,omitempty"` Key string `yaml:"key,omitempty"` ClientCAs []string `yaml:"clientcas,omitempty"` + MinimumTLS string `yaml:"minimumtls,omitempty"` LetsEncrypt struct { CacheFile string `yaml:"cachefile,omitempty"` Email string `yaml:"email,omitempty"` @@ -105,6 +106,7 @@ var configStruct = Configuration{ Certificate string `yaml:"certificate,omitempty"` Key string `yaml:"key,omitempty"` ClientCAs []string `yaml:"clientcas,omitempty"` + MinimumTLS string `yaml:"minimumtls,omitempty"` LetsEncrypt struct { CacheFile string `yaml:"cachefile,omitempty"` Email string `yaml:"email,omitempty"` diff --git a/docs/configuration.md b/docs/configuration.md index 5b506758..f9e368f9 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -777,6 +777,7 @@ http: clientcas: - /path/to/ca.pem - /path/to/another/ca.pem + minimumtls: tls1.0 letsencrypt: cachefile: /path/to/cache-file email: emailused@letsencrypt.com @@ -813,8 +814,9 @@ and proxy connections to the registry server. | Parameter | Required | Description | |-----------|----------|-------------------------------------------------------| | `certificate` | yes | Absolute path to the x509 certificate file. | -| `key` | yes | Absolute path to the x509 private key file. | -| `clientcas` | no | An array of absolute paths to x509 CA files. | +| `key` | yes | Absolute path to the x509 private key file. | +| `clientcas` | no | An array of absolute paths to x509 CA files. | +| `minimumtls` | no | Minimum TLS version allowed (tls1.0, tls1.1, tls1.2). Defaults to tls1.0 | ### `letsencrypt` diff --git a/registry/registry.go b/registry/registry.go index 18698f5b..fd50b46a 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -135,10 +135,26 @@ func (registry *Registry) ListenAndServe() error { } if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" { + var tlsMinVersion uint16 + if config.HTTP.TLS.MinimumTLS == "" { + tlsMinVersion = tls.VersionTLS10 + } else { + switch config.HTTP.TLS.MinimumTLS { + case "tls1.0": + tlsMinVersion = tls.VersionTLS10 + case "tls1.1": + tlsMinVersion = tls.VersionTLS11 + case "tls1.2": + tlsMinVersion = tls.VersionTLS12 + default: + return fmt.Errorf("unknown minimum TLS level '%s' specified for http.tls.minimumtls", config.HTTP.TLS.MinimumTLS) + } + dcontext.GetLogger(registry.app).Infof("restricting TLS to %s or higher", config.HTTP.TLS.MinimumTLS) + } tlsConf := &tls.Config{ ClientAuth: tls.NoClientCert, NextProtos: nextProtos(config), - MinVersion: tls.VersionTLS10, + MinVersion: tlsMinVersion, PreferServerCipherSuites: true, CipherSuites: []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, From 15b0204758876befc0664b64ccfb05f924627b2d Mon Sep 17 00:00:00 2001 From: Luca Bruno Date: Mon, 14 Jan 2019 08:53:03 +0000 Subject: [PATCH 191/276] registry: fix binary JSON content-type This fixes registry endpoints to return the proper `application/json` content-type for JSON content, also updating spec examples for that. As per IETF specification and IANA registry [0], the `application/json` type is a binary media, so the content-type label does not need any text-charset selector. Additionally, the media type definition explicitly states that it has no required nor optional parameters, which makes the current registry headers non-compliant. [0]: https://www.iana.org/assignments/media-types/application/json Signed-off-by: Luca Bruno --- contrib/compose/README.md | 2 +- docs/spec/api.md | 182 ++++++++++++++--------------- docs/spec/auth/token.md | 2 +- health/health.go | 2 +- registry/api/errcode/handler.go | 2 +- registry/api/v2/descriptors.go | 58 ++++----- registry/client/repository_test.go | 4 +- registry/handlers/api_test.go | 8 +- registry/handlers/app.go | 2 +- registry/handlers/app_test.go | 4 +- registry/handlers/catalog.go | 2 +- registry/handlers/tags.go | 2 +- 12 files changed, 135 insertions(+), 135 deletions(-) diff --git a/contrib/compose/README.md b/contrib/compose/README.md index cb2a81f8..01617860 100644 --- a/contrib/compose/README.md +++ b/contrib/compose/README.md @@ -133,7 +133,7 @@ to the 1.0 registry. Requests from newer clients will route to the 2.0 registry. > Accept: */* > < HTTP/1.1 200 OK - < Content-Type: application/json; charset=utf-8 + < Content-Type: application/json < Docker-Distribution-Api-Version: registry/2.0 < Date: Tue, 14 Apr 2015 22:34:13 GMT < Content-Length: 39 diff --git a/docs/spec/api.md b/docs/spec/api.md index 0a4009c8..64b9891e 100644 --- a/docs/spec/api.md +++ b/docs/spec/api.md @@ -1206,7 +1206,7 @@ The registry does not implement the V2 API. 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1244,7 +1244,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1316,7 +1316,7 @@ The following parameters should be specified on the request: ``` 200 OK Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "name": , @@ -1344,7 +1344,7 @@ The following headers will be returned with the response: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1382,7 +1382,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1419,7 +1419,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1456,7 +1456,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1514,7 +1514,7 @@ The following parameters should be specified on the request: 200 OK Content-Length: Link: <?n=&last=>; rel="next" -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "name": , @@ -1543,7 +1543,7 @@ The following headers will be returned with the response: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1581,7 +1581,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1618,7 +1618,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1655,7 +1655,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1759,7 +1759,7 @@ The following headers will be returned with the response: ``` 400 Bad Request -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1792,7 +1792,7 @@ The error codes that may be included in the response body are enumerated below: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1830,7 +1830,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1867,7 +1867,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -1904,7 +1904,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2005,7 +2005,7 @@ The following headers will be returned with the response: ``` 400 Bad Request -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2041,7 +2041,7 @@ The error codes that may be included in the response body are enumerated below: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2079,7 +2079,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2116,7 +2116,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2153,7 +2153,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2189,7 +2189,7 @@ The error codes that may be included in the response body are enumerated below: ``` 400 Bad Request -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [{ @@ -2277,7 +2277,7 @@ The following parameters should be specified on the request: ``` 400 Bad Request -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2310,7 +2310,7 @@ The error codes that may be included in the response body are enumerated below: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2348,7 +2348,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2385,7 +2385,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2422,7 +2422,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2458,7 +2458,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2583,7 +2583,7 @@ The following headers will be returned with the response: ``` 400 Bad Request -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2614,7 +2614,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2647,7 +2647,7 @@ The error codes that may be included in the response body are enumerated below: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2685,7 +2685,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2722,7 +2722,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2759,7 +2759,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2843,7 +2843,7 @@ The following headers will be returned with the response: ``` 400 Bad Request -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2874,7 +2874,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2917,7 +2917,7 @@ The range specification cannot be satisfied for the requested content. This can 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2955,7 +2955,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -2992,7 +2992,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3029,7 +3029,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3132,7 +3132,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3163,7 +3163,7 @@ The error codes that may be included in the response body are enumerated below: ``` 405 Method Not Allowed -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3195,7 +3195,7 @@ The error codes that may be included in the response body are enumerated below: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3233,7 +3233,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3270,7 +3270,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3307,7 +3307,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3445,7 +3445,7 @@ The error codes that may be included in the response body are enumerated below: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3483,7 +3483,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3520,7 +3520,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3557,7 +3557,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3662,7 +3662,7 @@ The error codes that may be included in the response body are enumerated below: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3700,7 +3700,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3737,7 +3737,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3774,7 +3774,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3897,7 +3897,7 @@ The error codes that may be included in the response body are enumerated below: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3935,7 +3935,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -3972,7 +3972,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4009,7 +4009,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4102,7 +4102,7 @@ The following headers will be returned with the response: ``` 400 Bad Request -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4134,7 +4134,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4166,7 +4166,7 @@ The error codes that may be included in the response body are enumerated below: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4204,7 +4204,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4241,7 +4241,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4278,7 +4278,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4370,7 +4370,7 @@ The following headers will be returned with the response: ``` 400 Bad Request -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4402,7 +4402,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4434,7 +4434,7 @@ The error codes that may be included in the response body are enumerated below: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4472,7 +4472,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4509,7 +4509,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4546,7 +4546,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4636,7 +4636,7 @@ The following headers will be returned with the response: ``` 400 Bad Request -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4668,7 +4668,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4710,7 +4710,7 @@ The `Content-Range` specification cannot be accepted, either because it does not 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4748,7 +4748,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4785,7 +4785,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4822,7 +4822,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4916,7 +4916,7 @@ The following headers will be returned with the response: ``` 400 Bad Request -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4949,7 +4949,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -4981,7 +4981,7 @@ The error codes that may be included in the response body are enumerated below: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -5019,7 +5019,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -5056,7 +5056,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -5093,7 +5093,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -5177,7 +5177,7 @@ The following headers will be returned with the response: ``` 400 Bad Request -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -5208,7 +5208,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -5240,7 +5240,7 @@ The error codes that may be included in the response body are enumerated below: 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -5278,7 +5278,7 @@ The error codes that may be included in the response body are enumerated below: ``` 404 Not Found Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -5315,7 +5315,7 @@ The error codes that may be included in the response body are enumerated below: ``` 403 Forbidden Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -5352,7 +5352,7 @@ The error codes that may be included in the response body are enumerated below: ``` 429 Too Many Requests Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "errors:" [ @@ -5414,7 +5414,7 @@ Request an unabridged list of repositories available. The implementation may im ``` 200 OK Content-Length: -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "repositories": [ @@ -5459,7 +5459,7 @@ The following parameters should be specified on the request: 200 OK Content-Length: Link: <?n=&last=>; rel="next" -Content-Type: application/json; charset=utf-8 +Content-Type: application/json { "repositories": [ diff --git a/docs/spec/auth/token.md b/docs/spec/auth/token.md index 97f1971d..2c1048ba 100644 --- a/docs/spec/auth/token.md +++ b/docs/spec/auth/token.md @@ -60,7 +60,7 @@ return this response: ``` HTTP/1.1 401 Unauthorized -Content-Type: application/json; charset=utf-8 +Content-Type: application/json Docker-Distribution-Api-Version: registry/2.0 Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" Date: Thu, 10 Sep 2015 19:32:31 GMT diff --git a/health/health.go b/health/health.go index 592bce57..93f8d6d1 100644 --- a/health/health.go +++ b/health/health.go @@ -291,7 +291,7 @@ func statusResponse(w http.ResponseWriter, r *http.Request, status int, checks m } } - w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Length", fmt.Sprint(len(p))) w.WriteHeader(status) if _, err := w.Write(p); err != nil { diff --git a/registry/api/errcode/handler.go b/registry/api/errcode/handler.go index d77e7047..ebb9ce92 100644 --- a/registry/api/errcode/handler.go +++ b/registry/api/errcode/handler.go @@ -9,7 +9,7 @@ import ( // and sets the content-type header to 'application/json'. It will handle // ErrorCoder and Errors, and if necessary will create an envelope. func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Type", "application/json") var sc int switch errs := err.(type) { diff --git a/registry/api/v2/descriptors.go b/registry/api/v2/descriptors.go index a9616c58..cffacc3c 100644 --- a/registry/api/v2/descriptors.go +++ b/registry/api/v2/descriptors.go @@ -126,7 +126,7 @@ var ( }, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -147,7 +147,7 @@ var ( }, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -168,7 +168,7 @@ var ( }, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -189,7 +189,7 @@ var ( }, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -441,7 +441,7 @@ var routeDescriptors = []RouteDescriptor{ }, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: `{ "name": , "tags": [ @@ -478,7 +478,7 @@ var routeDescriptors = []RouteDescriptor{ linkHeader, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: `{ "name": , "tags": [ @@ -541,7 +541,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeTagInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -592,7 +592,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", StatusCode: http.StatusBadRequest, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -615,7 +615,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: `{ "errors:" [{ "code": "BLOB_UNKNOWN", @@ -669,7 +669,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeTagInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -686,7 +686,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeManifestUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -766,7 +766,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeDigestInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -774,7 +774,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", StatusCode: http.StatusNotFound, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -838,7 +838,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeDigestInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -849,7 +849,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -905,7 +905,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", StatusCode: http.StatusNotFound, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -917,7 +917,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", StatusCode: http.StatusMethodNotAllowed, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -1179,7 +1179,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1190,7 +1190,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1254,7 +1254,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1265,7 +1265,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1336,7 +1336,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1347,7 +1347,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1431,7 +1431,7 @@ var routeDescriptors = []RouteDescriptor{ errcode.ErrorCodeUnsupported, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1442,7 +1442,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1488,7 +1488,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1499,7 +1499,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1539,7 +1539,7 @@ var routeDescriptors = []RouteDescriptor{ }, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: `{ "repositories": [ , @@ -1558,7 +1558,7 @@ var routeDescriptors = []RouteDescriptor{ { StatusCode: http.StatusOK, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: `{ "repositories": [ , diff --git a/registry/client/repository_test.go b/registry/client/repository_test.go index 88e7e4b5..35d80275 100644 --- a/registry/client/repository_test.go +++ b/registry/client/repository_test.go @@ -80,7 +80,7 @@ func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.R func addTestCatalog(route string, content []byte, link string, m *testutil.RequestResponseMap) { headers := map[string][]string{ "Content-Length": {strconv.Itoa(len(content))}, - "Content-Type": {"application/json; charset=utf-8"}, + "Content-Type": {"application/json"}, } if link != "" { headers["Link"] = append(headers["Link"], link) @@ -1062,7 +1062,7 @@ func TestObtainsErrorForMissingTag(t *testing.T) { StatusCode: http.StatusNotFound, Body: errBytes, Headers: http.Header(map[string][]string{ - "Content-Type": {"application/json; charset=utf-8"}, + "Content-Type": {"application/json"}, }), }, }) diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go index 2544efba..08f52768 100644 --- a/registry/handlers/api_test.go +++ b/registry/handlers/api_test.go @@ -65,7 +65,7 @@ func TestCheckAPI(t *testing.T) { checkResponse(t, "issuing api base check", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ - "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Type": []string{"application/json"}, "Content-Length": []string{"2"}, }) @@ -259,7 +259,7 @@ func TestURLPrefix(t *testing.T) { checkResponse(t, "issuing api base check", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ - "Content-Type": []string{"application/json; charset=utf-8"}, + "Content-Type": []string{"application/json"}, "Content-Length": []string{"2"}, }) } @@ -1180,7 +1180,7 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name // charset. resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, schema1.MediaTypeSignedManifest, sm2) checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) - resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json; charset=utf-8", sm2) + resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json", sm2) checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json", sm2) checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) @@ -2329,7 +2329,7 @@ func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, error // TODO(stevvooe): Shoot. The error setup is not working out. The content- // type headers are being set after writing the status code. - // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { + // if resp.Header.Get("Content-Type") != "application/json" { // t.Fatalf("unexpected content type: %v != 'application/json'", // resp.Header.Get("Content-Type")) // } diff --git a/registry/handlers/app.go b/registry/handlers/app.go index 978851bb..3807aed8 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -898,7 +898,7 @@ func (app *App) nameRequired(r *http.Request) bool { func apiBase(w http.ResponseWriter, r *http.Request) { const emptyJSON = "{}" // Provide a simple /v2/ 200 OK response with empty json response. - w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) fmt.Fprint(w, emptyJSON) diff --git a/registry/handlers/app_test.go b/registry/handlers/app_test.go index 12c0b61c..f95b441f 100644 --- a/registry/handlers/app_test.go +++ b/registry/handlers/app_test.go @@ -188,8 +188,8 @@ func TestNewApp(t *testing.T) { t.Fatalf("unexpected status code during request: %v", err) } - if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { - t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") + if req.Header.Get("Content-Type") != "application/json" { + t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json") } expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" diff --git a/registry/handlers/catalog.go b/registry/handlers/catalog.go index eca98468..15fb3951 100644 --- a/registry/handlers/catalog.go +++ b/registry/handlers/catalog.go @@ -55,7 +55,7 @@ func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { return } - w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Type", "application/json") // Add a link header if there are more entries to retrieve if moreEntries { diff --git a/registry/handlers/tags.go b/registry/handlers/tags.go index 91f1031e..ea6ecc4a 100644 --- a/registry/handlers/tags.go +++ b/registry/handlers/tags.go @@ -49,7 +49,7 @@ func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { return } - w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Type", "application/json") enc := json.NewEncoder(w) if err := enc.Encode(tagsAPIResponse{ From da8db4666b08431e701b044c9077648533d97b7e Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Mon, 4 Feb 2019 16:01:04 -0800 Subject: [PATCH 192/276] Fix gometalint errors Signed-off-by: Manish Tomar --- configuration/configuration.go | 8 ++--- configuration/configuration_test.go | 4 +-- configuration/parser.go | 2 +- health/api/api.go | 2 +- manifest/manifestlist/manifestlist.go | 4 +-- manifest/ocischema/builder.go | 2 +- manifest/ocischema/manifest.go | 2 +- manifest/schema1/manifest.go | 4 +-- manifest/schema1/reference_builder.go | 2 +- manifest/schema2/manifest.go | 2 +- manifests.go | 2 +- notifications/bridge.go | 9 ----- notifications/event_test.go | 9 ++--- notifications/http.go | 3 +- notifications/listener_test.go | 3 +- notifications/sinks.go | 5 --- reference/reference.go | 2 +- registry/api/errcode/errors.go | 6 ++-- registry/api/v2/urls.go | 12 ------- registry/api/v2/urls_test.go | 5 --- .../client/auth/challenge/authchallenge.go | 4 +-- registry/client/repository_test.go | 2 +- registry/handlers/api_test.go | 10 ++---- registry/handlers/app.go | 6 ++-- registry/handlers/blobupload.go | 2 +- registry/handlers/hooks.go | 4 +-- registry/handlers/mail.go | 2 +- registry/proxy/proxyblobstore.go | 4 --- registry/proxy/proxyblobstore_test.go | 12 +------ registry/proxy/proxymanifeststore_test.go | 3 +- registry/proxy/scheduler/scheduler.go | 6 ++-- registry/registry.go | 2 +- registry/storage/blobstore.go | 10 ------ registry/storage/driver/azure/azure.go | 6 ++-- registry/storage/driver/inmemory/mfs.go | 14 -------- registry/storage/driver/s3-aws/s3.go | 36 +++++++++---------- .../storage/driver/s3-aws/s3_v2_signer.go | 6 ---- registry/storage/driver/swift/swift.go | 28 +++++++-------- registry/storage/linkedblobstore_test.go | 5 ++- registry/storage/manifeststore_test.go | 8 ++--- registry/storage/paths.go | 21 +++-------- registry/storage/paths_test.go | 1 - registry/storage/purgeuploads.go | 2 +- registry/storage/purgeuploads_test.go | 4 +-- registry/storage/schema2manifesthandler.go | 5 ++- registry/storage/tagstore.go | 19 ---------- registry/storage/util.go | 22 ------------ 47 files changed, 92 insertions(+), 240 deletions(-) delete mode 100644 registry/storage/util.go diff --git a/configuration/configuration.go b/configuration/configuration.go index 690b8ae3..34884dd2 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -391,7 +391,7 @@ func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error switch loglevelString { case "error", "warn", "info", "debug": default: - return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) + return fmt.Errorf("invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) } *loglevel = Loglevel(loglevelString) @@ -466,7 +466,7 @@ func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { } if len(types) > 1 { - return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) + return fmt.Errorf("must provide exactly one storage type. Provided: %v", types) } } *storage = storageMap @@ -668,11 +668,11 @@ func Parse(rd io.Reader) (*Configuration, error) { v0_1.Loglevel = Loglevel("") } if v0_1.Storage.Type() == "" { - return nil, errors.New("No storage configuration provided") + return nil, errors.New("no storage configuration provided") } return (*Configuration)(v0_1), nil } - return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c) + return nil, fmt.Errorf("expected *v0_1Configuration, received %#v", c) }, }, }) diff --git a/configuration/configuration_test.go b/configuration/configuration_test.go index d57fbf4f..977d569a 100644 --- a/configuration/configuration_test.go +++ b/configuration/configuration_test.go @@ -542,9 +542,7 @@ func copyConfig(config Configuration) *Configuration { } configCopy.Notifications = Notifications{Endpoints: []Endpoint{}} - for _, v := range config.Notifications.Endpoints { - configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, v) - } + configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, config.Notifications.Endpoints...) configCopy.HTTP.Headers = make(http.Header) for k, v := range config.HTTP.Headers { diff --git a/configuration/parser.go b/configuration/parser.go index b46f7326..2bf59d21 100644 --- a/configuration/parser.go +++ b/configuration/parser.go @@ -122,7 +122,7 @@ func (p *Parser) Parse(in []byte, v interface{}) error { parseInfo, ok := p.mapping[versionedStruct.Version] if !ok { - return fmt.Errorf("Unsupported version: %q", versionedStruct.Version) + return fmt.Errorf("unsupported version: %q", versionedStruct.Version) } parseAs := reflect.New(parseInfo.ParseAs) diff --git a/health/api/api.go b/health/api/api.go index 73fcc453..a323a127 100644 --- a/health/api/api.go +++ b/health/api/api.go @@ -14,7 +14,7 @@ var ( // DownHandler registers a manual_http_status that always returns an Error func DownHandler(w http.ResponseWriter, r *http.Request) { if r.Method == "POST" { - updater.Update(errors.New("Manual Check")) + updater.Update(errors.New("manual Check")) } else { w.WriteHeader(http.StatusNotFound) } diff --git a/manifest/manifestlist/manifestlist.go b/manifest/manifestlist/manifestlist.go index f4e915ee..54c8f3c9 100644 --- a/manifest/manifestlist/manifestlist.go +++ b/manifest/manifestlist/manifestlist.go @@ -163,7 +163,7 @@ func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType st }, } - m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + m.Manifests = make([]ManifestDescriptor, len(descriptors)) copy(m.Manifests, descriptors) deserialized := DeserializedManifestList{ @@ -177,7 +177,7 @@ func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType st // UnmarshalJSON populates a new ManifestList struct from JSON data. func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { - m.canonical = make([]byte, len(b), len(b)) + m.canonical = make([]byte, len(b)) // store manifest list in canonical copy(m.canonical, b) diff --git a/manifest/ocischema/builder.go b/manifest/ocischema/builder.go index 023a8a0b..d90453bc 100644 --- a/manifest/ocischema/builder.go +++ b/manifest/ocischema/builder.go @@ -48,7 +48,7 @@ func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotati // valid media type for oci image manifests currently: "" or "application/vnd.oci.image.manifest.v1+json" func (mb *Builder) SetMediaType(mediaType string) error { if mediaType != "" && mediaType != v1.MediaTypeImageManifest { - return errors.New("Invalid media type for OCI image manifest") + return errors.New("invalid media type for OCI image manifest") } mb.mediaType = mediaType diff --git a/manifest/ocischema/manifest.go b/manifest/ocischema/manifest.go index 09ce78b5..b8c4bab5 100644 --- a/manifest/ocischema/manifest.go +++ b/manifest/ocischema/manifest.go @@ -87,7 +87,7 @@ func FromStruct(m Manifest) (*DeserializedManifest, error) { // UnmarshalJSON populates a new Manifest struct from JSON data. func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { - m.canonical = make([]byte, len(b), len(b)) + m.canonical = make([]byte, len(b)) // store manifest in canonical copy(m.canonical, b) diff --git a/manifest/schema1/manifest.go b/manifest/schema1/manifest.go index 5a06b54b..9fef4dc7 100644 --- a/manifest/schema1/manifest.go +++ b/manifest/schema1/manifest.go @@ -108,7 +108,7 @@ type SignedManifest struct { // UnmarshalJSON populates a new SignedManifest struct from JSON data. func (sm *SignedManifest) UnmarshalJSON(b []byte) error { - sm.all = make([]byte, len(b), len(b)) + sm.all = make([]byte, len(b)) // store manifest and signatures in all copy(sm.all, b) @@ -124,7 +124,7 @@ func (sm *SignedManifest) UnmarshalJSON(b []byte) error { } // sm.Canonical stores the canonical manifest JSON - sm.Canonical = make([]byte, len(bytes), len(bytes)) + sm.Canonical = make([]byte, len(bytes)) copy(sm.Canonical, bytes) // Unmarshal canonical JSON into Manifest object diff --git a/manifest/schema1/reference_builder.go b/manifest/schema1/reference_builder.go index a4f6032c..0f1d386a 100644 --- a/manifest/schema1/reference_builder.go +++ b/manifest/schema1/reference_builder.go @@ -58,7 +58,7 @@ func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Man func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { r, ok := d.(Reference) if !ok { - return fmt.Errorf("Unable to add non-reference type to v1 builder") + return fmt.Errorf("unable to add non-reference type to v1 builder") } // Entries need to be prepended diff --git a/manifest/schema2/manifest.go b/manifest/schema2/manifest.go index ee29438f..41f48029 100644 --- a/manifest/schema2/manifest.go +++ b/manifest/schema2/manifest.go @@ -106,7 +106,7 @@ func FromStruct(m Manifest) (*DeserializedManifest, error) { // UnmarshalJSON populates a new Manifest struct from JSON data. func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { - m.canonical = make([]byte, len(b), len(b)) + m.canonical = make([]byte, len(b)) // store manifest in canonical copy(m.canonical, b) diff --git a/manifests.go b/manifests.go index 1816baea..8f84a220 100644 --- a/manifests.go +++ b/manifests.go @@ -87,7 +87,7 @@ func ManifestMediaTypes() (mediaTypes []string) { // UnmarshalFunc implements manifest unmarshalling a given MediaType type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) -var mappings = make(map[string]UnmarshalFunc, 0) +var mappings = make(map[string]UnmarshalFunc) // UnmarshalManifest looks up manifest unmarshal functions based on // MediaType diff --git a/notifications/bridge.go b/notifications/bridge.go index eb9af41a..86af43f3 100644 --- a/notifications/bridge.go +++ b/notifications/bridge.go @@ -125,15 +125,6 @@ func (b *bridge) RepoDeleted(repo reference.Named) error { return b.sink.Write(*event) } -func (b *bridge) createManifestEventAndWrite(action string, repo reference.Named, sm distribution.Manifest) error { - manifestEvent, err := b.createManifestEvent(action, repo, sm) - if err != nil { - return err - } - - return b.sink.Write(*manifestEvent) -} - func (b *bridge) createManifestDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error { event := b.createEvent(action) event.Target.Repository = repo.Name() diff --git a/notifications/event_test.go b/notifications/event_test.go index 0981a7ad..2dddf944 100644 --- a/notifications/event_test.go +++ b/notifications/event_test.go @@ -114,8 +114,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { prototype.Request.UserAgent = "test/0.1" prototype.Source.Addr = "hostname.local:port" - var manifestPush Event - manifestPush = prototype + var manifestPush = prototype manifestPush.ID = "asdf-asdf-asdf-asdf-0" manifestPush.Target.Digest = "sha256:0123456789abcdef0" manifestPush.Target.Length = 1 @@ -124,8 +123,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { manifestPush.Target.Repository = "library/test" manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" - var layerPush0 Event - layerPush0 = prototype + var layerPush0 = prototype layerPush0.ID = "asdf-asdf-asdf-asdf-1" layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" layerPush0.Target.Length = 2 @@ -134,8 +132,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { layerPush0.Target.Repository = "library/test" layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" - var layerPush1 Event - layerPush1 = prototype + var layerPush1 = prototype layerPush1.ID = "asdf-asdf-asdf-asdf-2" layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6" layerPush1.Target.Length = 3 diff --git a/notifications/http.go b/notifications/http.go index 15751619..46f47af2 100644 --- a/notifications/http.go +++ b/notifications/http.go @@ -133,8 +133,7 @@ type headerRoundTripper struct { } func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - var nreq http.Request - nreq = *req + var nreq = *req nreq.Header = make(http.Header) merge := func(headers http.Header) { diff --git a/notifications/listener_test.go b/notifications/listener_test.go index ff49fd00..dee4dac5 100644 --- a/notifications/listener_test.go +++ b/notifications/listener_test.go @@ -136,11 +136,10 @@ func checkExerciseRepository(t *testing.T, repository distribution.Repository, r var blobDigests []digest.Digest blobs := repository.Blobs(ctx) for i := 0; i < 2; i++ { - rs, ds, err := testutil.CreateRandomTarFile() + rs, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating test layer: %v", err) } - dgst := digest.Digest(ds) blobDigests = append(blobDigests, dgst) wr, err := blobs.Create(ctx) diff --git a/notifications/sinks.go b/notifications/sinks.go index 14b692a3..816f75d7 100644 --- a/notifications/sinks.go +++ b/notifications/sinks.go @@ -284,11 +284,6 @@ type retryingSink struct { } } -type retryingSinkListener interface { - active(events ...Event) - retry(events ...Event) -} - // TODO(stevvooe): We are using circuit break here, which actually doesn't // make a whole lot of sense for this use case, since we always retry. Move // this to use bounded exponential backoff. diff --git a/reference/reference.go b/reference/reference.go index 2f66cca8..8c0c23b2 100644 --- a/reference/reference.go +++ b/reference/reference.go @@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) { var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { + if len(nameMatch) == 3 { repo.domain = nameMatch[1] repo.path = nameMatch[2] } else { diff --git a/registry/api/errcode/errors.go b/registry/api/errcode/errors.go index 6d9bb4b6..4c35b879 100644 --- a/registry/api/errcode/errors.go +++ b/registry/api/errcode/errors.go @@ -207,11 +207,11 @@ func (errs Errors) MarshalJSON() ([]byte, error) { for _, daErr := range errs { var err Error - switch daErr.(type) { + switch daErr := daErr.(type) { case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) + err = daErr.WithDetail(nil) case Error: - err = daErr.(Error) + err = daErr default: err = ErrorCodeUnknown.WithDetail(daErr) diff --git a/registry/api/v2/urls.go b/registry/api/v2/urls.go index 1337bdb1..3c3ec989 100644 --- a/registry/api/v2/urls.go +++ b/registry/api/v2/urls.go @@ -252,15 +252,3 @@ func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { u.RawQuery = merged.Encode() return u } - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} diff --git a/registry/api/v2/urls_test.go b/registry/api/v2/urls_test.go index 4f854b23..fbf9d0d4 100644 --- a/registry/api/v2/urls_test.go +++ b/registry/api/v2/urls_test.go @@ -182,11 +182,6 @@ func TestURLBuilderWithPrefix(t *testing.T) { doTest(false) } -type builderFromRequestTestCase struct { - request *http.Request - base string -} - func TestBuilderFromRequest(t *testing.T) { u, err := url.Parse("http://example.com") if err != nil { diff --git a/registry/client/auth/challenge/authchallenge.go b/registry/client/auth/challenge/authchallenge.go index 6e3f1ccc..fe238210 100644 --- a/registry/client/auth/challenge/authchallenge.go +++ b/registry/client/auth/challenge/authchallenge.go @@ -117,8 +117,8 @@ func init() { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { t |= isSpace } if isChar && !isCtl && !isSeparator { diff --git a/registry/client/repository_test.go b/registry/client/repository_test.go index 88e7e4b5..53b53cce 100644 --- a/registry/client/repository_test.go +++ b/registry/client/repository_test.go @@ -152,7 +152,7 @@ func TestBlobFetch(t *testing.T) { if err != nil { t.Fatal(err) } - if bytes.Compare(b, b1) != 0 { + if !bytes.Equal(b, b1) { t.Fatalf("Wrong bytes values fetched: [%d]byte != [%d]byte", len(b), len(b1)) } diff --git a/registry/handlers/api_test.go b/registry/handlers/api_test.go index 2544efba..092f7be2 100644 --- a/registry/handlers/api_test.go +++ b/registry/handlers/api_test.go @@ -959,7 +959,6 @@ func testManifestWithStorageError(t *testing.T, env *testEnv, imageName referenc defer resp.Body.Close() checkResponse(t, "getting non-existent manifest", resp, expectedStatusCode) checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, expectedErrorCode) - return } func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { @@ -1066,12 +1065,11 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name expectedLayers := make(map[digest.Digest]io.ReadSeeker) for i := range unsignedManifest.FSLayers { - rs, dgstStr, err := testutil.CreateRandomTarFile() + rs, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer %d: %v", i, err) } - dgst := digest.Digest(dgstStr) expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst @@ -1405,12 +1403,11 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name expectedLayers := make(map[digest.Digest]io.ReadSeeker) for i := range manifest.Layers { - rs, dgstStr, err := testutil.CreateRandomTarFile() + rs, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer %d: %v", i, err) } - dgst := digest.Digest(dgstStr) expectedLayers[dgst] = rs manifest.Layers[i].Digest = dgst @@ -2432,11 +2429,10 @@ func createRepository(env *testEnv, t *testing.T, imageName string, tag string) expectedLayers := make(map[digest.Digest]io.ReadSeeker) for i := range unsignedManifest.FSLayers { - rs, dgstStr, err := testutil.CreateRandomTarFile() + rs, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer %d: %v", i, err) } - dgst := digest.Digest(dgstStr) expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst diff --git a/registry/handlers/app.go b/registry/handlers/app.go index 978851bb..5980753b 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -753,20 +753,18 @@ func (app *App) logError(ctx context.Context, errors errcode.Errors) { for _, e1 := range errors { var c context.Context - switch e1.(type) { + switch e := e1.(type) { case errcode.Error: - e, _ := e1.(errcode.Error) c = context.WithValue(ctx, errCodeKey{}, e.Code) c = context.WithValue(c, errMessageKey{}, e.Message) c = context.WithValue(c, errDetailKey{}, e.Detail) case errcode.ErrorCode: - e, _ := e1.(errcode.ErrorCode) c = context.WithValue(ctx, errCodeKey{}, e) c = context.WithValue(c, errMessageKey{}, e.Message()) default: // just normal go 'error' c = context.WithValue(ctx, errCodeKey{}, errcode.ErrorCodeUnknown) - c = context.WithValue(c, errMessageKey{}, e1.Error()) + c = context.WithValue(c, errMessageKey{}, e.Error()) } c = dcontext.WithLogger(c, dcontext.GetLogger(c, diff --git a/registry/handlers/blobupload.go b/registry/handlers/blobupload.go index 49ab1aaa..b6bfe302 100644 --- a/registry/handlers/blobupload.go +++ b/registry/handlers/blobupload.go @@ -172,7 +172,7 @@ func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Reque ct := r.Header.Get("Content-Type") if ct != "" && ct != "application/octet-stream" { - buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type"))) + buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("bad Content-Type"))) // TODO(dmcgowan): encode error return } diff --git a/registry/handlers/hooks.go b/registry/handlers/hooks.go index e51df2b7..4a2b1be8 100644 --- a/registry/handlers/hooks.go +++ b/registry/handlers/hooks.go @@ -20,7 +20,7 @@ type logHook struct { func (hook *logHook) Fire(entry *logrus.Entry) error { addr := strings.Split(hook.Mail.Addr, ":") if len(addr) != 2 { - return errors.New("Invalid Mail Address") + return errors.New("invalid Mail Address") } host := addr[0] subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message) @@ -37,7 +37,7 @@ func (hook *logHook) Fire(entry *logrus.Entry) error { if err := t.Execute(b, entry); err != nil { return err } - body := fmt.Sprintf("%s", b) + body := b.String() return hook.Mail.sendMail(subject, body) } diff --git a/registry/handlers/mail.go b/registry/handlers/mail.go index 9e884265..684b1b34 100644 --- a/registry/handlers/mail.go +++ b/registry/handlers/mail.go @@ -17,7 +17,7 @@ type mailer struct { func (mail *mailer) sendMail(subject, message string) error { addr := strings.Split(mail.Addr, ":") if len(addr) != 2 { - return errors.New("Invalid Mail Address") + return errors.New("invalid Mail Address") } host := addr[0] msg := []byte("To:" + strings.Join(mail.To, ";") + diff --git a/registry/proxy/proxyblobstore.go b/registry/proxy/proxyblobstore.go index fc59552b..9ca9c968 100644 --- a/registry/proxy/proxyblobstore.go +++ b/registry/proxy/proxyblobstore.go @@ -6,7 +6,6 @@ import ( "net/http" "strconv" "sync" - "time" "github.com/docker/distribution" dcontext "github.com/docker/distribution/context" @@ -15,9 +14,6 @@ import ( "github.com/opencontainers/go-digest" ) -// todo(richardscothern): from cache control header or config file -const blobTTL = 24 * 7 * time.Hour - type proxyBlobStore struct { localStore distribution.BlobStore remoteStore distribution.BlobService diff --git a/registry/proxy/proxyblobstore_test.go b/registry/proxy/proxyblobstore_test.go index 4bf4ea8a..6e90dbe5 100644 --- a/registry/proxy/proxyblobstore_test.go +++ b/registry/proxy/proxyblobstore_test.go @@ -193,7 +193,7 @@ func makeTestEnv(t *testing.T, name string) *testEnv { } func makeBlob(size int) []byte { - blob := make([]byte, size, size) + blob := make([]byte, size) for i := 0; i < size; i++ { blob[i] = byte('A' + rand.Int()%48) } @@ -204,16 +204,6 @@ func init() { rand.Seed(42) } -func perm(m []distribution.Descriptor) []distribution.Descriptor { - for i := 0; i < len(m); i++ { - j := rand.Intn(i + 1) - tmp := m[i] - m[i] = m[j] - m[j] = tmp - } - return m -} - func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) { var inRemote []distribution.Descriptor diff --git a/registry/proxy/proxymanifeststore_test.go b/registry/proxy/proxymanifeststore_test.go index 99aaed28..82a1a2b7 100644 --- a/registry/proxy/proxymanifeststore_test.go +++ b/registry/proxy/proxymanifeststore_test.go @@ -165,11 +165,10 @@ func populateRepo(ctx context.Context, t *testing.T, repository distribution.Rep t.Fatalf("unexpected error creating test upload: %v", err) } - rs, ts, err := testutil.CreateRandomTarFile() + rs, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("unexpected error generating test layer file") } - dgst := digest.Digest(ts) if _, err := io.Copy(wr, rs); err != nil { t.Fatalf("unexpected error copying to upload: %v", err) } diff --git a/registry/proxy/scheduler/scheduler.go b/registry/proxy/scheduler/scheduler.go index f8340184..f3c1caa0 100644 --- a/registry/proxy/scheduler/scheduler.go +++ b/registry/proxy/scheduler/scheduler.go @@ -118,7 +118,7 @@ func (ttles *TTLExpirationScheduler) Start() error { } if !ttles.stopped { - return fmt.Errorf("Scheduler already started") + return fmt.Errorf("scheduler already started") } dcontext.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") @@ -126,7 +126,7 @@ func (ttles *TTLExpirationScheduler) Start() error { // Start timer for each deserialized entry for _, entry := range ttles.entries { - entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now())) + entry.timer = ttles.startTimer(entry, time.Until(entry.Expiry)) } // Start a ticker to periodically save the entries index @@ -164,7 +164,7 @@ func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duratio Expiry: time.Now().Add(ttl), EntryType: eType, } - dcontext.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) + dcontext.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, time.Until(entry.Expiry)) if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil { oldEntry.timer.Stop() } diff --git a/registry/registry.go b/registry/registry.go index fd50b46a..03ff3fd7 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -201,7 +201,7 @@ func (registry *Registry) ListenAndServe() error { } if ok := pool.AppendCertsFromPEM(caPem); !ok { - return fmt.Errorf("Could not add CA to pool") + return fmt.Errorf("could not add CA to pool") } } diff --git a/registry/storage/blobstore.go b/registry/storage/blobstore.go index 9fdf5219..1008aad8 100644 --- a/registry/storage/blobstore.go +++ b/registry/storage/blobstore.go @@ -152,16 +152,6 @@ func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, return linked, nil } -// resolve reads the digest link at path and returns the blob store path. -func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { - dgst, err := bs.readlink(ctx, path) - if err != nil { - return "", err - } - - return bs.path(dgst) -} - type blobStatter struct { driver driver.StorageDriver } diff --git a/registry/storage/driver/azure/azure.go b/registry/storage/driver/azure/azure.go index 906e7792..58c6293b 100644 --- a/registry/storage/driver/azure/azure.go +++ b/registry/storage/driver/azure/azure.go @@ -55,17 +55,17 @@ func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (st func FromParameters(parameters map[string]interface{}) (*Driver, error) { accountName, ok := parameters[paramAccountName] if !ok || fmt.Sprint(accountName) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramAccountName) + return nil, fmt.Errorf("no %s parameter provided", paramAccountName) } accountKey, ok := parameters[paramAccountKey] if !ok || fmt.Sprint(accountKey) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) + return nil, fmt.Errorf("no %s parameter provided", paramAccountKey) } container, ok := parameters[paramContainer] if !ok || fmt.Sprint(container) == "" { - return nil, fmt.Errorf("No %s parameter provided", paramContainer) + return nil, fmt.Errorf("no %s parameter provided", paramContainer) } realm, ok := parameters[paramRealm] diff --git a/registry/storage/driver/inmemory/mfs.go b/registry/storage/driver/inmemory/mfs.go index cdefacfd..9a2865f9 100644 --- a/registry/storage/driver/inmemory/mfs.go +++ b/registry/storage/driver/inmemory/mfs.go @@ -252,20 +252,6 @@ func (d *dir) delete(p string) error { return nil } -// dump outputs a primitive directory structure to stdout. -func (d *dir) dump(indent string) { - fmt.Println(indent, d.name()+"/") - - for _, child := range d.children { - if child.isdir() { - child.(*dir).dump(indent + "\t") - } else { - fmt.Println(indent, child.name()) - } - - } -} - func (d *dir) String() string { return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) } diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 800435d0..126a07f6 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -188,19 +188,19 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { regionName := parameters["region"] if regionName == nil || fmt.Sprint(regionName) == "" { - return nil, fmt.Errorf("No region parameter provided") + return nil, fmt.Errorf("no region parameter provided") } region := fmt.Sprint(regionName) // Don't check the region value if a custom endpoint is provided. if regionEndpoint == "" { if _, ok := validRegions[region]; !ok { - return nil, fmt.Errorf("Invalid region provided: %v", region) + return nil, fmt.Errorf("invalid region provided: %v", region) } } bucket := parameters["bucket"] if bucket == nil || fmt.Sprint(bucket) == "" { - return nil, fmt.Errorf("No bucket parameter provided") + return nil, fmt.Errorf("no bucket parameter provided") } encryptBool := false @@ -209,7 +209,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { case string: b, err := strconv.ParseBool(encrypt) if err != nil { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") + return nil, fmt.Errorf("the encrypt parameter should be a boolean") } encryptBool = b case bool: @@ -217,7 +217,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { case nil: // do nothing default: - return nil, fmt.Errorf("The encrypt parameter should be a boolean") + return nil, fmt.Errorf("the encrypt parameter should be a boolean") } secureBool := true @@ -226,7 +226,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { case string: b, err := strconv.ParseBool(secure) if err != nil { - return nil, fmt.Errorf("The secure parameter should be a boolean") + return nil, fmt.Errorf("the secure parameter should be a boolean") } secureBool = b case bool: @@ -234,7 +234,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { case nil: // do nothing default: - return nil, fmt.Errorf("The secure parameter should be a boolean") + return nil, fmt.Errorf("the secure parameter should be a boolean") } skipVerifyBool := false @@ -243,7 +243,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { case string: b, err := strconv.ParseBool(skipVerify) if err != nil { - return nil, fmt.Errorf("The skipVerify parameter should be a boolean") + return nil, fmt.Errorf("the skipVerify parameter should be a boolean") } skipVerifyBool = b case bool: @@ -251,7 +251,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { case nil: // do nothing default: - return nil, fmt.Errorf("The skipVerify parameter should be a boolean") + return nil, fmt.Errorf("the skipVerify parameter should be a boolean") } v4Bool := true @@ -260,7 +260,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { case string: b, err := strconv.ParseBool(v4auth) if err != nil { - return nil, fmt.Errorf("The v4auth parameter should be a boolean") + return nil, fmt.Errorf("the v4auth parameter should be a boolean") } v4Bool = b case bool: @@ -268,7 +268,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { case nil: // do nothing default: - return nil, fmt.Errorf("The v4auth parameter should be a boolean") + return nil, fmt.Errorf("the v4auth parameter should be a boolean") } keyID := parameters["keyid"] @@ -306,7 +306,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { if storageClassParam != nil { storageClassString, ok := storageClassParam.(string) if !ok { - return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", + return nil, fmt.Errorf("the storageclass parameter must be one of %v, %v invalid", []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) } // All valid storage class parameters are UPPERCASE, so be a bit more flexible here @@ -314,7 +314,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { if storageClassString != noStorageClass && storageClassString != s3.StorageClassStandard && storageClassString != s3.StorageClassReducedRedundancy { - return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", + return nil, fmt.Errorf("the storageclass parameter must be one of %v, %v invalid", []string{noStorageClass, s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) } storageClass = storageClassString @@ -330,11 +330,11 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { if objectACLParam != nil { objectACLString, ok := objectACLParam.(string) if !ok { - return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam) + return nil, fmt.Errorf("invalid value for objectacl parameter: %v", objectACLParam) } if _, ok = validObjectACLs[objectACLString]; !ok { - return nil, fmt.Errorf("Invalid value for objectacl parameter: %v", objectACLParam) + return nil, fmt.Errorf("invalid value for objectacl parameter: %v", objectACLParam) } objectACL = objectACLString } @@ -389,7 +389,7 @@ func getParameterAsInt64(parameters map[string]interface{}, name string, default } if rv < min || rv > max { - return 0, fmt.Errorf("The %s %#v parameter should be a number between %d and %d (inclusive)", name, rv, min, max) + return 0, fmt.Errorf("the %s %#v parameter should be a number between %d and %d (inclusive)", name, rv, min, max) } return rv, nil @@ -401,7 +401,7 @@ func New(params DriverParameters) (*Driver, error) { if !params.V4Auth && (params.RegionEndpoint == "" || strings.Contains(params.RegionEndpoint, "s3.amazonaws.com")) { - return nil, fmt.Errorf("On Amazon S3 this storage driver can only be used with v4 authentication") + return nil, fmt.Errorf("on Amazon S3 this storage driver can only be used with v4 authentication") } awsConfig := aws.NewConfig() @@ -878,7 +878,7 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int if ok { et, ok := expires.(time.Time) if ok { - expiresIn = et.Sub(time.Now()) + expiresIn = time.Until(et) } } diff --git a/registry/storage/driver/s3-aws/s3_v2_signer.go b/registry/storage/driver/s3-aws/s3_v2_signer.go index 29688ae5..9f1e621c 100644 --- a/registry/storage/driver/s3-aws/s3_v2_signer.go +++ b/registry/storage/driver/s3-aws/s3_v2_signer.go @@ -39,12 +39,6 @@ import ( log "github.com/sirupsen/logrus" ) -const ( - signatureVersion = "2" - signatureMethod = "HmacSHA1" - timeFormat = "2006-01-02T15:04:05Z" -) - type signer struct { // Values that must be populated from the request Request *http.Request diff --git a/registry/storage/driver/swift/swift.go b/registry/storage/driver/swift/swift.go index 46201ee7..adbe8aa9 100644 --- a/registry/storage/driver/swift/swift.go +++ b/registry/storage/driver/swift/swift.go @@ -160,23 +160,23 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { } if params.Username == "" { - return nil, fmt.Errorf("No username parameter provided") + return nil, fmt.Errorf("no username parameter provided") } if params.Password == "" { - return nil, fmt.Errorf("No password parameter provided") + return nil, fmt.Errorf("no password parameter provided") } if params.AuthURL == "" { - return nil, fmt.Errorf("No authurl parameter provided") + return nil, fmt.Errorf("no authurl parameter provided") } if params.Container == "" { - return nil, fmt.Errorf("No container parameter provided") + return nil, fmt.Errorf("no container parameter provided") } if params.ChunkSize < minChunkSize { - return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) + return nil, fmt.Errorf("the chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) } return New(params) @@ -211,15 +211,15 @@ func New(params Parameters) (*Driver, error) { } err := ct.Authenticate() if err != nil { - return nil, fmt.Errorf("Swift authentication failed: %s", err) + return nil, fmt.Errorf("swift authentication failed: %s", err) } if _, _, err := ct.Container(params.Container); err == swift.ContainerNotFound { if err := ct.ContainerCreate(params.Container, nil); err != nil { - return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) + return nil, fmt.Errorf("failed to create container %s (%s)", params.Container, err) } } else if err != nil { - return nil, fmt.Errorf("Failed to retrieve info about container %s (%s)", params.Container, err) + return nil, fmt.Errorf("failed to retrieve info about container %s (%s)", params.Container, err) } d := &driver{ @@ -258,7 +258,7 @@ func New(params Parameters) (*Driver, error) { if d.TempURLContainerKey { _, containerHeaders, err := d.Conn.Container(d.Container) if err != nil { - return nil, fmt.Errorf("Failed to fetch container info %s (%s)", d.Container, err) + return nil, fmt.Errorf("failed to fetch container info %s (%s)", d.Container, err) } d.SecretKey = containerHeaders["X-Container-Meta-Temp-Url-Key"] @@ -273,7 +273,7 @@ func New(params Parameters) (*Driver, error) { // Use the account secret key _, accountHeaders, err := d.Conn.Account() if err != nil { - return nil, fmt.Errorf("Failed to fetch account info (%s)", err) + return nil, fmt.Errorf("failed to fetch account info (%s)", err) } d.SecretKey = accountHeaders["X-Account-Meta-Temp-Url-Key"] @@ -350,7 +350,7 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read } if isDLO && size == 0 { if time.Now().Add(waitingTime).After(endTime) { - return nil, fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) + return nil, fmt.Errorf("timeout expired while waiting for segments of %s to show up", path) } time.Sleep(waitingTime) waitingTime *= 2 @@ -456,7 +456,7 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, _, isDLO := headers["X-Object-Manifest"] if isDLO && info.Bytes == 0 { if time.Now().Add(waitingTime).After(endTime) { - return nil, fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) + return nil, fmt.Errorf("timeout expired while waiting for segments of %s to show up", path) } time.Sleep(waitingTime) waitingTime *= 2 @@ -755,7 +755,7 @@ func chunkFilenames(slice []string, maxSize int) (chunks [][]string, err error) chunks = append(chunks, slice[offset:offset+chunkSize]) } } else { - return nil, fmt.Errorf("Max chunk size must be > 0") + return nil, fmt.Errorf("max chunk size must be > 0") } return } @@ -894,7 +894,7 @@ func (w *writer) waitForSegmentsToShowUp() error { if info.Bytes == w.size { break } - err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", w.path) + err = fmt.Errorf("timeout expired while waiting for segments of %s to show up", w.path) } if time.Now().Add(waitingTime).After(endTime) { break diff --git a/registry/storage/linkedblobstore_test.go b/registry/storage/linkedblobstore_test.go index e0ffd279..7682b45c 100644 --- a/registry/storage/linkedblobstore_test.go +++ b/registry/storage/linkedblobstore_test.go @@ -27,13 +27,12 @@ func TestLinkedBlobStoreCreateWithMountFrom(t *testing.T) { // readseekers for upload later. testLayers := map[digest.Digest]io.ReadSeeker{} for i := 0; i < 2; i++ { - rs, ds, err := testutil.CreateRandomTarFile() + rs, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("unexpected error generating test layer file") } - dgst := digest.Digest(ds) - testLayers[digest.Digest(dgst)] = rs + testLayers[dgst] = rs } // upload the layers to foo/bar diff --git a/registry/storage/manifeststore_test.go b/registry/storage/manifeststore_test.go index 2b3177b8..cfe18c12 100644 --- a/registry/storage/manifeststore_test.go +++ b/registry/storage/manifeststore_test.go @@ -91,13 +91,12 @@ func testManifestStorage(t *testing.T, schema1Enabled bool, options ...RegistryO // readseekers for upload later. testLayers := map[digest.Digest]io.ReadSeeker{} for i := 0; i < 2; i++ { - rs, ds, err := testutil.CreateRandomTarFile() + rs, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("unexpected error generating test layer file") } - dgst := digest.Digest(ds) - testLayers[digest.Digest(dgst)] = rs + testLayers[dgst] = rs m.FSLayers = append(m.FSLayers, schema1.FSLayer{ BlobSum: dgst, }) @@ -414,11 +413,10 @@ func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes boo // Add some layers for i := 0; i < 2; i++ { - rs, ds, err := testutil.CreateRandomTarFile() + rs, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("%s: unexpected error generating test layer file", testname) } - dgst := digest.Digest(ds) wr, err := env.repository.Blobs(env.ctx).Create(env.ctx) if err != nil { diff --git a/registry/storage/paths.go b/registry/storage/paths.go index b6d9b9b5..2d1f5132 100644 --- a/registry/storage/paths.go +++ b/registry/storage/paths.go @@ -133,10 +133,7 @@ func pathFor(spec pathSpec) (string, error) { return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil case manifestRevisionLinkPathSpec: - root, err := pathFor(manifestRevisionPathSpec{ - name: v.name, - revision: v.revision, - }) + root, err := pathFor(manifestRevisionPathSpec(v)) if err != nil { return "", err @@ -156,10 +153,7 @@ func pathFor(spec pathSpec) (string, error) { return path.Join(root, v.tag), nil case manifestTagCurrentPathSpec: - root, err := pathFor(manifestTagPathSpec{ - name: v.name, - tag: v.tag, - }) + root, err := pathFor(manifestTagPathSpec(v)) if err != nil { return "", err @@ -167,10 +161,7 @@ func pathFor(spec pathSpec) (string, error) { return path.Join(root, "current", "link"), nil case manifestTagIndexPathSpec: - root, err := pathFor(manifestTagPathSpec{ - name: v.name, - tag: v.tag, - }) + root, err := pathFor(manifestTagPathSpec(v)) if err != nil { return "", err @@ -178,11 +169,7 @@ func pathFor(spec pathSpec) (string, error) { return path.Join(root, "index"), nil case manifestTagIndexEntryLinkPathSpec: - root, err := pathFor(manifestTagIndexEntryPathSpec{ - name: v.name, - tag: v.tag, - revision: v.revision, - }) + root, err := pathFor(manifestTagIndexEntryPathSpec(v)) if err != nil { return "", err diff --git a/registry/storage/paths_test.go b/registry/storage/paths_test.go index 677a34b9..ab3b8445 100644 --- a/registry/storage/paths_test.go +++ b/registry/storage/paths_test.go @@ -10,7 +10,6 @@ func TestPathMapper(t *testing.T) { for _, testcase := range []struct { spec pathSpec expected string - err error }{ { spec: manifestRevisionPathSpec{ diff --git a/registry/storage/purgeuploads.go b/registry/storage/purgeuploads.go index 7ebd1604..cac92121 100644 --- a/registry/storage/purgeuploads.go +++ b/registry/storage/purgeuploads.go @@ -59,7 +59,7 @@ func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, older // file, so gather files by UUID with a date from startedAt. func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { var errors []error - uploads := make(map[string]uploadData, 0) + uploads := make(map[string]uploadData) inUploadDir := false root, err := pathFor(repositoriesRootPathSpec{}) diff --git a/registry/storage/purgeuploads_test.go b/registry/storage/purgeuploads_test.go index 23c553ba..398df2b6 100644 --- a/registry/storage/purgeuploads_test.go +++ b/registry/storage/purgeuploads_test.go @@ -118,7 +118,7 @@ func TestPurgeOnlyUploads(t *testing.T) { t.Fatalf(err.Error()) } nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) - if strings.Index(nonUploadPath, "_upload") != -1 { + if strings.Contains(nonUploadPath, "_upload") { t.Fatalf("Non-upload path not created correctly") } @@ -132,7 +132,7 @@ func TestPurgeOnlyUploads(t *testing.T) { t.Error("Unexpected errors", errs) } for _, file := range deleted { - if strings.Index(file, "_upload") == -1 { + if !strings.Contains(file, "_upload") { t.Errorf("Non-upload file deleted") } } diff --git a/registry/storage/schema2manifesthandler.go b/registry/storage/schema2manifesthandler.go index acb3255b..25d9d84f 100644 --- a/registry/storage/schema2manifesthandler.go +++ b/registry/storage/schema2manifesthandler.go @@ -14,9 +14,8 @@ import ( ) var ( - errUnexpectedURL = errors.New("unexpected URL on layer") - errMissingURL = errors.New("missing URL on layer") - errInvalidURL = errors.New("invalid URL on layer") + errMissingURL = errors.New("missing URL on layer") + errInvalidURL = errors.New("invalid URL on layer") ) //schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. diff --git a/registry/storage/tagstore.go b/registry/storage/tagstore.go index f80b9628..a3c766b4 100644 --- a/registry/storage/tagstore.go +++ b/registry/storage/tagstore.go @@ -50,25 +50,6 @@ func (ts *tagStore) All(ctx context.Context) ([]string, error) { return tags, nil } -// exists returns true if the specified manifest tag exists in the repository. -func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { - tagPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Named().Name(), - tag: tag, - }) - - if err != nil { - return false, err - } - - exists, err := exists(ctx, ts.blobStore.driver, tagPath) - if err != nil { - return false, err - } - - return exists, nil -} - // Tag tags the digest with the given tag, updating the the store to point at // the current tag. The digest must point to a manifest. func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { diff --git a/registry/storage/util.go b/registry/storage/util.go deleted file mode 100644 index 8ab235e2..00000000 --- a/registry/storage/util.go +++ /dev/null @@ -1,22 +0,0 @@ -package storage - -import ( - "context" - - "github.com/docker/distribution/registry/storage/driver" -) - -// Exists provides a utility method to test whether or not a path exists in -// the given driver. -func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { - if _, err := drv.Stat(ctx, path); err != nil { - switch err := err.(type) { - case driver.PathNotFoundError: - return false, nil - default: - return false, err - } - } - - return true, nil -} From 48818fdea7d7b8c34ed1e4284e638007ebc2e5d7 Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Mon, 4 Feb 2019 16:42:44 -0800 Subject: [PATCH 193/276] Remove err nil check since type checking nil will not panic and return appropriately Signed-off-by: Manish Tomar --- registry/storage/garbagecollect.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/registry/storage/garbagecollect.go b/registry/storage/garbagecollect.go index 48d428a8..317c792d 100644 --- a/registry/storage/garbagecollect.go +++ b/registry/storage/garbagecollect.go @@ -98,15 +98,13 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis return nil }) - if err != nil { - // In certain situations such as unfinished uploads, deleting all - // tags in S3 or removing the _manifests folder manually, this - // error may be of type PathNotFound. - // - // In these cases we can continue marking other manifests safely. - if _, ok := err.(driver.PathNotFoundError); ok { - return nil - } + // In certain situations such as unfinished uploads, deleting all + // tags in S3 or removing the _manifests folder manually, this + // error may be of type PathNotFound. + // + // In these cases we can continue marking other manifests safely. + if _, ok := err.(driver.PathNotFoundError); ok { + return nil } return err From 3aa2a282f76b7d1f71d28d602cfb4f81b1388b94 Mon Sep 17 00:00:00 2001 From: Shawnpku Date: Mon, 11 Feb 2019 15:07:36 +0800 Subject: [PATCH 194/276] support alicdn middleware Signed-off-by: Shawnpku --- docs/configuration.md | 11 ++ .../driver/middleware/alicdn/middleware.go | 116 ++++++++++++++++++ .../aliyungo/cdn/auth/random_uuid.go | 97 +++++++++++++++ .../aliyungo/cdn/auth/random_uuid_test.go | 21 ++++ .../denverdino/aliyungo/cdn/auth/sign_url.go | 80 ++++++++++++ .../aliyungo/cdn/auth/sign_url_test.go | 53 ++++++++ 6 files changed, 378 insertions(+) create mode 100755 registry/storage/driver/middleware/alicdn/middleware.go create mode 100644 vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid.go create mode 100644 vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid_test.go create mode 100644 vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url.go create mode 100644 vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url_test.go diff --git a/docs/configuration.md b/docs/configuration.md index c26fa16d..10245bf1 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -715,6 +715,17 @@ Then value of ipfilteredby: `aws`: IP from AWS goes to S3 directly `awsregion`: IP from certain AWS regions goes to S3 directly, use together with `awsregion` +### `alicdn` + +`alicdn` storage middleware allows the registry to serve layers via a content delivery network provided by Alibaba Cloud. Alicdn requires the OSS storage driver. + +| Parameter | Required | Description | +|-----------|----------|-------------------------------------------------------| +| `baseurl` | yes | The `SCHEME://HOST` at which Alicdn is served. | +| `authtype` | yes | The URL authentication type for Alicdn, which should be `a`, `b` or `c`. | +| `privatekey` | yes | The URL authentication key for Alicdn. | +| `duration` | no | An integer and unit for the duration of the Alicdn session. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, or `h`.| + ### `redirect` You can use the `redirect` storage middleware to specify a custom URL to a diff --git a/registry/storage/driver/middleware/alicdn/middleware.go b/registry/storage/driver/middleware/alicdn/middleware.go new file mode 100755 index 00000000..828d88c2 --- /dev/null +++ b/registry/storage/driver/middleware/alicdn/middleware.go @@ -0,0 +1,116 @@ +package middleware + +import ( + "fmt" + "net/url" + "strings" + "time" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" + + "github.com/denverdino/aliyungo/cdn/auth" +) + +// aliCdnStorageMiddleware provides a simple implementation of layerHandler that +// constructs temporary signed AliCDN URLs from the storagedriver layer URL, +// then issues HTTP Temporary Redirects to this AliCDN content URL. +type aliCdnStorageMiddleware struct { + storagedriver.StorageDriver + baseURL string + urlSigner *auth.URLSigner + duration time.Duration +} + +var _ storagedriver.StorageDriver = &aliCdnStorageMiddleware{} + +// newAliCdnLayerHandler constructs and returns a new AliCDN +// LayerHandler implementation. +// Required options: baseurl, authtype, privatekey +// Optional options: duration +func newAliCdnStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { + // parse baseurl + base, ok := options["baseurl"] + if !ok { + return nil, fmt.Errorf("no baseurl provided") + } + baseURL, ok := base.(string) + if !ok { + return nil, fmt.Errorf("baseurl must be a string") + } + if !strings.Contains(baseURL, "://") { + baseURL = "https://" + baseURL + } + if _, err := url.Parse(baseURL); err != nil { + return nil, fmt.Errorf("invalid baseurl: %v", err) + } + + // parse authtype + at, ok := options["authtype"] + if !ok { + return nil, fmt.Errorf("no authtype provided") + } + authType, ok := at.(string) + if !ok { + return nil, fmt.Errorf("authtype must be a string") + } + if authType != "a" && authType != "b" && authType != "c" { + return nil, fmt.Errorf("invalid authentication type") + } + + // parse privatekey + pk, ok := options["privatekey"] + if !ok { + return nil, fmt.Errorf("no privatekey provided") + } + privateKey, ok := pk.(string) + if !ok { + return nil, fmt.Errorf("privatekey must be a string") + } + + urlSigner := auth.NewURLSigner(authType, privateKey) + + // parse duration + duration := 60 * time.Minute + d, ok := options["duration"] + if ok { + switch d := d.(type) { + case time.Duration: + duration = d + case string: + dur, err := time.ParseDuration(d) + if err != nil { + return nil, fmt.Errorf("invalid duration: %s", err) + } + duration = dur + } + } + + return &aliCdnStorageMiddleware{ + StorageDriver: storageDriver, + baseURL: baseURL, + urlSigner: urlSigner, + duration: duration, + }, nil +} + +// URLFor attempts to find a url which may be used to retrieve the file at the given path. +// Returns an error if the file cannot be found. +func (ac *aliCdnStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + + if ac.StorageDriver.Name() != "oss" { + context.GetLogger(ctx).Warn("the AliCdn middleware does not support this backend storage driver") + return ac.StorageDriver.URLFor(ctx, path, options) + } + acURL, err := ac.urlSigner.Sign(ac.baseURL+path, time.Now().Add(ac.duration)) + if err != nil { + return "", err + } + return acURL, nil +} + +// init registers the alicdn layerHandler backend. +func init() { + storagemiddleware.Register("alicdn", storagemiddleware.InitFunc(newAliCdnStorageMiddleware)) +} diff --git a/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid.go b/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid.go new file mode 100644 index 00000000..169a2b6c --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid.go @@ -0,0 +1,97 @@ +package auth + +import ( + "crypto/rand" + "fmt" + "io" + "os" + "syscall" + "time" +) + +const ( + // Bits is the number of bits in a UUID + Bits = 128 + + // Size is the number of bytes in a UUID + Size = Bits / 8 + + format = "%08x%04x%04x%04x%012x" +) + +var ( + // Loggerf can be used to override the default logging destination. Such + // log messages in this library should be logged at warning or higher. + Loggerf = func(format string, args ...interface{}) {} +) + +// UUID represents a UUID value. UUIDs can be compared and set to other values +// and accessed by byte. +type UUID [Size]byte + +// GenerateUUID creates a new, version 4 uuid. +func GenerateUUID() (u UUID) { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + ) + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + Loggerf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + u[6] = (u[6] & 0x0f) | 0x40 // set version byte + u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} + + return u +} + +func (u UUID) String() string { + return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid_test.go b/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid_test.go new file mode 100644 index 00000000..d2cb1a4e --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid_test.go @@ -0,0 +1,21 @@ +package auth + +import ( + "testing" +) + +const iterations = 1000 + +func TestUUID4Generation(t *testing.T) { + for i := 0; i < iterations; i++ { + u := GenerateUUID() + + if u[6]&0xf0 != 0x40 { + t.Fatalf("version byte not correctly set: %v, %08b %08b", u, u[6], u[6]&0xf0) + } + + if u[8]&0xc0 != 0x80 { + t.Fatalf("top order 8th byte not correctly set: %v, %b", u, u[8]) + } + } +} diff --git a/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url.go b/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url.go new file mode 100644 index 00000000..211bd04f --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url.go @@ -0,0 +1,80 @@ +package auth + +import ( + "crypto/md5" + "fmt" + "net/url" + "time" +) + +// An URLSigner provides URL signing utilities to sign URLs for Aliyun CDN +// resources. +// authentication document: https://help.aliyun.com/document_detail/85117.html +type URLSigner struct { + authType string + privKey string +} + +// NewURLSigner returns a new signer object. +func NewURLSigner(authType string, privKey string) *URLSigner { + return &URLSigner{ + authType: authType, + privKey: privKey, + } +} + +// Sign returns a signed aliyuncdn url based on authentication type +func (s URLSigner) Sign(uri string, expires time.Time) (string, error) { + r, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf("unable to parse url: %s", uri) + } + + switch s.authType { + case "a": + return aTypeSign(r, s.privKey, expires), nil + case "b": + return bTypeSign(r, s.privKey, expires), nil + case "c": + return cTypeSign(r, s.privKey, expires), nil + default: + return "", fmt.Errorf("invalid authentication type") + } +} + +// sign by A type authentication method. +// authentication document: https://help.aliyun.com/document_detail/85113.html +func aTypeSign(r *url.URL, privateKey string, expires time.Time) string { + //rand is a random uuid without "-" + rand := GenerateUUID().String() + // not use, "0" by default + uid := "0" + secret := fmt.Sprintf("%s-%d-%s-%s-%s", r.Path, expires.Unix(), rand, uid, privateKey) + hashValue := md5.Sum([]byte(secret)) + authKey := fmt.Sprintf("%d-%s-%s-%x", expires.Unix(), rand, uid, hashValue) + if r.RawQuery == "" { + return fmt.Sprintf("%s?auth_key=%s", r.String(), authKey) + } + return fmt.Sprintf("%s&auth_key=%s", r.String(), authKey) + +} + +// sign by B type authentication method. +// authentication document: https://help.aliyun.com/document_detail/85114.html +func bTypeSign(r *url.URL, privateKey string, expires time.Time) string { + formatExp := expires.Format("200601021504") + secret := privateKey + formatExp + r.Path + hashValue := md5.Sum([]byte(secret)) + signURL := fmt.Sprintf("%s://%s/%s/%x%s?%s", r.Scheme, r.Host, formatExp, hashValue, r.Path, r.RawQuery) + return signURL +} + +// sign by C type authentication method. +// authentication document: https://help.aliyun.com/document_detail/85115.html +func cTypeSign(r *url.URL, privateKey string, expires time.Time) string { + hexExp := fmt.Sprintf("%x", expires.Unix()) + secret := privateKey + r.Path + hexExp + hashValue := md5.Sum([]byte(secret)) + signURL := fmt.Sprintf("%s://%s/%x/%s%s?%s", r.Scheme, r.Host, hashValue, hexExp, r.Path, r.RawQuery) + return signURL +} diff --git a/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url_test.go b/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url_test.go new file mode 100644 index 00000000..46291d48 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url_test.go @@ -0,0 +1,53 @@ +package auth + +import ( + "crypto/md5" + "fmt" + "net/url" + "reflect" + "testing" + "time" +) + +var ( + testSignTime = time.Unix(1541064730, 0) + testPrivKey = "12345678" +) + +func assertEqual(t *testing.T, name string, x, y interface{}) { + if !reflect.DeepEqual(x, y) { + t.Errorf("%s: Not equal! Expected='%v', Actual='%v'\n", name, x, y) + t.FailNow() + } +} + +func TestAtypeAuth(t *testing.T) { + r, _ := url.Parse("https://example.com/a?foo=bar") + url := aTypeTest(r, testPrivKey, testSignTime) + assertEqual(t, "testTypeA", "https://example.com/a?foo=bar&auth_key=1541064730-0-0-f9dd5ed1e274ab4b1d5f5745344bf28b", url) +} + +func TestBtypeAuth(t *testing.T) { + signer := NewURLSigner("b", testPrivKey) + url, _ := signer.Sign("https://example.com/a?foo=bar", testSignTime) + assertEqual(t, "testTypeB", "https://example.com/201811011732/3a19d83a89ccb00a73212420791b0123/a?foo=bar", url) +} + +func TestCtypeAuth(t *testing.T) { + signer := NewURLSigner("c", testPrivKey) + url, _ := signer.Sign("https://example.com/a?foo=bar", testSignTime) + assertEqual(t, "testTypeC", "https://example.com/7d6b308ce87beb16d9dba32d741220f6/5bdac81a/a?foo=bar", url) +} + +func aTypeTest(r *url.URL, privateKey string, expires time.Time) string { + //rand equals "0" in test case + rand := "0" + uid := "0" + secret := fmt.Sprintf("%s-%d-%s-%s-%s", r.Path, expires.Unix(), rand, uid, privateKey) + hashValue := md5.Sum([]byte(secret)) + authKey := fmt.Sprintf("%d-%s-%s-%x", expires.Unix(), rand, uid, hashValue) + if r.RawQuery == "" { + return fmt.Sprintf("%s?auth_key=%s", r.String(), authKey) + } + return fmt.Sprintf("%s&auth_key=%s", r.String(), authKey) +} From ec6566c02b9c452d8e465ffa41c1c20117228917 Mon Sep 17 00:00:00 2001 From: Manish Tomar Date: Wed, 13 Feb 2019 08:49:37 -0800 Subject: [PATCH 195/276] Log authorized username This is useful to know which user pulled/pushed which repo. Signed-off-by: Manish Tomar --- registry/handlers/app.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/registry/handlers/app.go b/registry/handlers/app.go index 5980753b..22587087 100644 --- a/registry/handlers/app.go +++ b/registry/handlers/app.go @@ -862,7 +862,7 @@ func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Cont return err } - dcontext.GetLogger(ctx).Info("authorized request") + dcontext.GetLogger(ctx, auth.UserNameKey).Info("authorized request") // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context // should be replaced by another, rather than replacing the context on a // mutable object. From 8b706168467e9352e0c567633c3c264a340de79d Mon Sep 17 00:00:00 2001 From: tifayuki Date: Tue, 13 Feb 2018 13:30:56 -0800 Subject: [PATCH 196/276] Add notification metrics It adds notification related prometheus metrics, including: - total count for events/success/failure/error - total count for notification per each status code - gauge of the pending notification queue Signed-off-by: tifayuki --- metrics/prometheus.go | 3 +++ notifications/metrics.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/metrics/prometheus.go b/metrics/prometheus.go index b5a53214..91b32b23 100644 --- a/metrics/prometheus.go +++ b/metrics/prometheus.go @@ -10,4 +10,7 @@ const ( var ( // StorageNamespace is the prometheus namespace of blob/cache related operations StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) + + // NotificationsNamespace is the prometheus namespace of notification related metrics + NotificationsNamespace = metrics.NewNamespace(NamespacePrefix, "notifications", nil) ) diff --git a/notifications/metrics.go b/notifications/metrics.go index a20af168..69960e9c 100644 --- a/notifications/metrics.go +++ b/notifications/metrics.go @@ -5,6 +5,18 @@ import ( "fmt" "net/http" "sync" + + prometheus "github.com/docker/distribution/metrics" + "github.com/docker/go-metrics" +) + +var ( + // eventsCounter counts total events of incoming, success, failure, and errors + eventsCounter = prometheus.NotificationsNamespace.NewLabeledCounter("events", "The number of total events", "type") + // pendingGauge measures the pending queue size + pendingGauge = prometheus.NotificationsNamespace.NewGauge("pending", "The gauge of pending events in queue", metrics.Total) + // statusCounter counts the total notification call per each status code + statusCounter = prometheus.NotificationsNamespace.NewLabeledCounter("status", "The number of status code", "code") ) // EndpointMetrics track various actions taken by the endpoint, typically by @@ -61,6 +73,9 @@ func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Eve defer emsl.safeMetrics.Unlock() emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) emsl.Successes += len(events) + + statusCounter.WithValues(fmt.Sprintf("%d %s", status, http.StatusText(status))).Inc(1) + eventsCounter.WithValues("Successes").Inc(1) } func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { @@ -68,12 +83,17 @@ func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Eve defer emsl.safeMetrics.Unlock() emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) emsl.Failures += len(events) + + statusCounter.WithValues(fmt.Sprintf("%d %s", status, http.StatusText(status))).Inc(1) + eventsCounter.WithValues("Failures").Inc(1) } func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { emsl.safeMetrics.Lock() defer emsl.safeMetrics.Unlock() emsl.Errors += len(events) + + eventsCounter.WithValues("Errors").Inc(1) } // endpointMetricsEventQueueListener maintains the incoming events counter and @@ -87,12 +107,17 @@ func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { defer eqc.Unlock() eqc.Events += len(events) eqc.Pending += len(events) + + eventsCounter.WithValues("Events").Inc() + pendingGauge.Inc(1) } func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { eqc.Lock() defer eqc.Unlock() eqc.Pending -= len(events) + + pendingGauge.Dec(1) } // endpoints is global registry of endpoints used to report metrics to expvar @@ -149,4 +174,7 @@ func init() { })) registry.(*expvar.Map).Set("notifications", ¬ifications) + + // register prometheus metrics + metrics.Register(prometheus.NotificationsNamespace) } From 76da6290b0b99d6d6ce635424426e7803edb4303 Mon Sep 17 00:00:00 2001 From: Honglin Feng Date: Thu, 11 Oct 2018 21:39:02 +0800 Subject: [PATCH 197/276] add label to the metrics Signed-off-by: Honglin Feng --- notifications/endpoint.go | 2 +- notifications/http_test.go | 2 +- notifications/metrics.go | 26 ++++++++++++++------------ notifications/sinks_test.go | 2 +- 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/notifications/endpoint.go b/notifications/endpoint.go index a8a52d0c..854f1dd6 100644 --- a/notifications/endpoint.go +++ b/notifications/endpoint.go @@ -58,7 +58,7 @@ func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { endpoint.url = url endpoint.EndpointConfig = config endpoint.defaults() - endpoint.metrics = newSafeMetrics() + endpoint.metrics = newSafeMetrics(name) // Configures the inmemory queue, retry, http pipeline. endpoint.Sink = newHTTPSink( diff --git a/notifications/http_test.go b/notifications/http_test.go index de47f789..b7845cf9 100644 --- a/notifications/http_test.go +++ b/notifications/http_test.go @@ -63,7 +63,7 @@ func TestHTTPSink(t *testing.T) { }) server := httptest.NewTLSServer(serverHandler) - metrics := newSafeMetrics() + metrics := newSafeMetrics("") sink := newHTTPSink(server.URL, 0, nil, nil, &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) diff --git a/notifications/metrics.go b/notifications/metrics.go index 69960e9c..4464edd8 100644 --- a/notifications/metrics.go +++ b/notifications/metrics.go @@ -12,11 +12,11 @@ import ( var ( // eventsCounter counts total events of incoming, success, failure, and errors - eventsCounter = prometheus.NotificationsNamespace.NewLabeledCounter("events", "The number of total events", "type") + eventsCounter = prometheus.NotificationsNamespace.NewLabeledCounter("events", "The number of total events", "type", "to") // pendingGauge measures the pending queue size - pendingGauge = prometheus.NotificationsNamespace.NewGauge("pending", "The gauge of pending events in queue", metrics.Total) + pendingGauge = prometheus.NotificationsNamespace.NewLabeledGauge("pending", "The gauge of pending events in queue", metrics.Total, "to") // statusCounter counts the total notification call per each status code - statusCounter = prometheus.NotificationsNamespace.NewLabeledCounter("status", "The number of status code", "code") + statusCounter = prometheus.NotificationsNamespace.NewLabeledCounter("status", "The number of status code", "code", "to") ) // EndpointMetrics track various actions taken by the endpoint, typically by @@ -34,14 +34,16 @@ type EndpointMetrics struct { // safeMetrics guards the metrics implementation with a lock and provides a // safe update function. type safeMetrics struct { + EndpointName string EndpointMetrics sync.Mutex // protects statuses map } // newSafeMetrics returns safeMetrics with map allocated. -func newSafeMetrics() *safeMetrics { +func newSafeMetrics(name string) *safeMetrics { var sm safeMetrics sm.Statuses = make(map[string]int) + sm.EndpointName = name return &sm } @@ -74,8 +76,8 @@ func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Eve emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) emsl.Successes += len(events) - statusCounter.WithValues(fmt.Sprintf("%d %s", status, http.StatusText(status))).Inc(1) - eventsCounter.WithValues("Successes").Inc(1) + statusCounter.WithValues(fmt.Sprintf("%d %s", status, http.StatusText(status)), emsl.EndpointName).Inc(1) + eventsCounter.WithValues("Successes", emsl.EndpointName).Inc(1) } func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { @@ -84,8 +86,8 @@ func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Eve emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) emsl.Failures += len(events) - statusCounter.WithValues(fmt.Sprintf("%d %s", status, http.StatusText(status))).Inc(1) - eventsCounter.WithValues("Failures").Inc(1) + statusCounter.WithValues(fmt.Sprintf("%d %s", status, http.StatusText(status)), emsl.EndpointName).Inc(1) + eventsCounter.WithValues("Failures", emsl.EndpointName).Inc(1) } func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { @@ -93,7 +95,7 @@ func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { defer emsl.safeMetrics.Unlock() emsl.Errors += len(events) - eventsCounter.WithValues("Errors").Inc(1) + eventsCounter.WithValues("Errors", emsl.EndpointName).Inc(1) } // endpointMetricsEventQueueListener maintains the incoming events counter and @@ -108,8 +110,8 @@ func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { eqc.Events += len(events) eqc.Pending += len(events) - eventsCounter.WithValues("Events").Inc() - pendingGauge.Inc(1) + eventsCounter.WithValues("Events", eqc.EndpointName).Inc() + pendingGauge.WithValues(eqc.EndpointName).Inc(1) } func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { @@ -117,7 +119,7 @@ func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { defer eqc.Unlock() eqc.Pending -= len(events) - pendingGauge.Dec(1) + pendingGauge.WithValues(eqc.EndpointName).Dec(1) } // endpoints is global registry of endpoints used to report metrics to expvar diff --git a/notifications/sinks_test.go b/notifications/sinks_test.go index 06f88b2c..4a69486b 100644 --- a/notifications/sinks_test.go +++ b/notifications/sinks_test.go @@ -66,7 +66,7 @@ func TestBroadcaster(t *testing.T) { func TestEventQueue(t *testing.T) { const nevents = 1000 var ts testSink - metrics := newSafeMetrics() + metrics := newSafeMetrics("") eq := newEventQueue( // delayed sync simulates destination slower than channel comms &delayedSink{ From 228bafca0b419d16e50704d25d13b7d6a07c3925 Mon Sep 17 00:00:00 2001 From: Honglin Feng Date: Mon, 15 Oct 2018 19:50:38 +0800 Subject: [PATCH 198/276] run go fmt Signed-off-by: Honglin Feng --- registry/storage/driver/s3-aws/s3.go | 10 +++++----- registry/storage/linkedblobstore.go | 16 ++++++++-------- registry/storage/linkedblobstore_test.go | 4 ++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 126a07f6..124619ce 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -476,11 +476,11 @@ func New(params DriverParameters) (*Driver, error) { // } d := &driver{ - S3: s3obj, - Bucket: params.Bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - KeyID: params.KeyID, + S3: s3obj, + Bucket: params.Bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + KeyID: params.KeyID, MultipartCopyChunkSize: params.MultipartCopyChunkSize, MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency, MultipartCopyThresholdSize: params.MultipartCopyThresholdSize, diff --git a/registry/storage/linkedblobstore.go b/registry/storage/linkedblobstore.go index de591c8a..3fb1da26 100644 --- a/registry/storage/linkedblobstore.go +++ b/registry/storage/linkedblobstore.go @@ -312,14 +312,14 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string } bw := &blobWriter{ - ctx: ctx, - blobStore: lbs, - id: uuid, - startedAt: startedAt, - digester: digest.Canonical.Digester(), - fileWriter: fw, - driver: lbs.driver, - path: path, + ctx: ctx, + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.Digester(), + fileWriter: fw, + driver: lbs.driver, + path: path, resumableDigestEnabled: lbs.resumableDigestEnabled, } diff --git a/registry/storage/linkedblobstore_test.go b/registry/storage/linkedblobstore_test.go index 7682b45c..43774d85 100644 --- a/registry/storage/linkedblobstore_test.go +++ b/registry/storage/linkedblobstore_test.go @@ -161,8 +161,8 @@ type mockBlobDescriptorServiceFactory struct { func (f *mockBlobDescriptorServiceFactory) BlobAccessController(svc distribution.BlobDescriptorService) distribution.BlobDescriptorService { return &mockBlobDescriptorService{ BlobDescriptorService: svc, - t: f.t, - stats: f.stats, + t: f.t, + stats: f.stats, } } From 09a63caa37ca9ad7b16b99d4806b82482ec796b5 Mon Sep 17 00:00:00 2001 From: Honglin Feng Date: Mon, 15 Oct 2018 20:18:36 +0800 Subject: [PATCH 199/276] run go fmt and goimports Signed-off-by: Honglin Feng --- registry/storage/driver/s3-aws/s3.go | 10 +++++----- registry/storage/linkedblobstore.go | 16 ++++++++-------- registry/storage/linkedblobstore_test.go | 4 ++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 124619ce..126a07f6 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -476,11 +476,11 @@ func New(params DriverParameters) (*Driver, error) { // } d := &driver{ - S3: s3obj, - Bucket: params.Bucket, - ChunkSize: params.ChunkSize, - Encrypt: params.Encrypt, - KeyID: params.KeyID, + S3: s3obj, + Bucket: params.Bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + KeyID: params.KeyID, MultipartCopyChunkSize: params.MultipartCopyChunkSize, MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency, MultipartCopyThresholdSize: params.MultipartCopyThresholdSize, diff --git a/registry/storage/linkedblobstore.go b/registry/storage/linkedblobstore.go index 3fb1da26..de591c8a 100644 --- a/registry/storage/linkedblobstore.go +++ b/registry/storage/linkedblobstore.go @@ -312,14 +312,14 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string } bw := &blobWriter{ - ctx: ctx, - blobStore: lbs, - id: uuid, - startedAt: startedAt, - digester: digest.Canonical.Digester(), - fileWriter: fw, - driver: lbs.driver, - path: path, + ctx: ctx, + blobStore: lbs, + id: uuid, + startedAt: startedAt, + digester: digest.Canonical.Digester(), + fileWriter: fw, + driver: lbs.driver, + path: path, resumableDigestEnabled: lbs.resumableDigestEnabled, } diff --git a/registry/storage/linkedblobstore_test.go b/registry/storage/linkedblobstore_test.go index 43774d85..7682b45c 100644 --- a/registry/storage/linkedblobstore_test.go +++ b/registry/storage/linkedblobstore_test.go @@ -161,8 +161,8 @@ type mockBlobDescriptorServiceFactory struct { func (f *mockBlobDescriptorServiceFactory) BlobAccessController(svc distribution.BlobDescriptorService) distribution.BlobDescriptorService { return &mockBlobDescriptorService{ BlobDescriptorService: svc, - t: f.t, - stats: f.stats, + t: f.t, + stats: f.stats, } } From d5a615b8c9a469c88bebec7294d0ae0c9931773a Mon Sep 17 00:00:00 2001 From: Honglin Feng Date: Fri, 15 Feb 2019 21:55:08 +0800 Subject: [PATCH 200/276] update the event number Signed-off-by: Honglin Feng --- notifications/metrics.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/notifications/metrics.go b/notifications/metrics.go index 4464edd8..3b43c51f 100644 --- a/notifications/metrics.go +++ b/notifications/metrics.go @@ -77,7 +77,7 @@ func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Eve emsl.Successes += len(events) statusCounter.WithValues(fmt.Sprintf("%d %s", status, http.StatusText(status)), emsl.EndpointName).Inc(1) - eventsCounter.WithValues("Successes", emsl.EndpointName).Inc(1) + eventsCounter.WithValues("Successes", emsl.EndpointName).Inc(float64(len(events))) } func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { @@ -87,7 +87,7 @@ func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Eve emsl.Failures += len(events) statusCounter.WithValues(fmt.Sprintf("%d %s", status, http.StatusText(status)), emsl.EndpointName).Inc(1) - eventsCounter.WithValues("Failures", emsl.EndpointName).Inc(1) + eventsCounter.WithValues("Failures", emsl.EndpointName).Inc(float64(len(events))) } func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { @@ -95,7 +95,7 @@ func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { defer emsl.safeMetrics.Unlock() emsl.Errors += len(events) - eventsCounter.WithValues("Errors", emsl.EndpointName).Inc(1) + eventsCounter.WithValues("Errors", emsl.EndpointName).Inc(float64(len(events))) } // endpointMetricsEventQueueListener maintains the incoming events counter and @@ -111,7 +111,7 @@ func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { eqc.Pending += len(events) eventsCounter.WithValues("Events", eqc.EndpointName).Inc() - pendingGauge.WithValues(eqc.EndpointName).Inc(1) + pendingGauge.WithValues(eqc.EndpointName).Inc(float64(len(events))) } func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { From 92a6436714be6438f1cc8e4490a0c638868881e9 Mon Sep 17 00:00:00 2001 From: Honglin Feng Date: Sun, 17 Feb 2019 12:46:01 +0800 Subject: [PATCH 201/276] rename the metrics label Signed-off-by: Honglin Feng --- notifications/metrics.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/notifications/metrics.go b/notifications/metrics.go index 3b43c51f..657b2aa0 100644 --- a/notifications/metrics.go +++ b/notifications/metrics.go @@ -12,11 +12,11 @@ import ( var ( // eventsCounter counts total events of incoming, success, failure, and errors - eventsCounter = prometheus.NotificationsNamespace.NewLabeledCounter("events", "The number of total events", "type", "to") + eventsCounter = prometheus.NotificationsNamespace.NewLabeledCounter("events", "The number of total events", "type", "endpoint") // pendingGauge measures the pending queue size - pendingGauge = prometheus.NotificationsNamespace.NewLabeledGauge("pending", "The gauge of pending events in queue", metrics.Total, "to") + pendingGauge = prometheus.NotificationsNamespace.NewLabeledGauge("pending", "The gauge of pending events in queue", metrics.Total, "endpoint") // statusCounter counts the total notification call per each status code - statusCounter = prometheus.NotificationsNamespace.NewLabeledCounter("status", "The number of status code", "code", "to") + statusCounter = prometheus.NotificationsNamespace.NewLabeledCounter("status", "The number of status code", "code", "endpoint") ) // EndpointMetrics track various actions taken by the endpoint, typically by From bbc9885aa216988e7a3332f9da197933de2849e3 Mon Sep 17 00:00:00 2001 From: Shawnpku Date: Mon, 11 Feb 2019 19:20:47 +0800 Subject: [PATCH 202/276] fix func name Signed-off-by: Shawnpku --- .../driver/middleware/alicdn/middleware.go | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/registry/storage/driver/middleware/alicdn/middleware.go b/registry/storage/driver/middleware/alicdn/middleware.go index 828d88c2..1609a9f6 100755 --- a/registry/storage/driver/middleware/alicdn/middleware.go +++ b/registry/storage/driver/middleware/alicdn/middleware.go @@ -13,23 +13,23 @@ import ( "github.com/denverdino/aliyungo/cdn/auth" ) -// aliCdnStorageMiddleware provides a simple implementation of layerHandler that +// aliCDNStorageMiddleware provides a simple implementation of layerHandler that // constructs temporary signed AliCDN URLs from the storagedriver layer URL, // then issues HTTP Temporary Redirects to this AliCDN content URL. -type aliCdnStorageMiddleware struct { +type aliCDNStorageMiddleware struct { storagedriver.StorageDriver baseURL string urlSigner *auth.URLSigner duration time.Duration } -var _ storagedriver.StorageDriver = &aliCdnStorageMiddleware{} +var _ storagedriver.StorageDriver = &aliCDNStorageMiddleware{} -// newAliCdnLayerHandler constructs and returns a new AliCDN -// LayerHandler implementation. +// newAliCDNStorageMiddleware constructs and returns a new AliCDN +// layerHandler implementation. // Required options: baseurl, authtype, privatekey // Optional options: duration -func newAliCdnStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { +func newAliCDNStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { // parse baseurl base, ok := options["baseurl"] if !ok { @@ -87,7 +87,7 @@ func newAliCdnStorageMiddleware(storageDriver storagedriver.StorageDriver, optio } } - return &aliCdnStorageMiddleware{ + return &aliCDNStorageMiddleware{ StorageDriver: storageDriver, baseURL: baseURL, urlSigner: urlSigner, @@ -96,11 +96,10 @@ func newAliCdnStorageMiddleware(storageDriver storagedriver.StorageDriver, optio } // URLFor attempts to find a url which may be used to retrieve the file at the given path. -// Returns an error if the file cannot be found. -func (ac *aliCdnStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { +func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { if ac.StorageDriver.Name() != "oss" { - context.GetLogger(ctx).Warn("the AliCdn middleware does not support this backend storage driver") + context.GetLogger(ctx).Warn("the AliCDN middleware does not support this backend storage driver") return ac.StorageDriver.URLFor(ctx, path, options) } acURL, err := ac.urlSigner.Sign(ac.baseURL+path, time.Now().Add(ac.duration)) @@ -112,5 +111,5 @@ func (ac *aliCdnStorageMiddleware) URLFor(ctx context.Context, path string, opti // init registers the alicdn layerHandler backend. func init() { - storagemiddleware.Register("alicdn", storagemiddleware.InitFunc(newAliCdnStorageMiddleware)) + storagemiddleware.Register("alicdn", storagemiddleware.InitFunc(newAliCDNStorageMiddleware)) } From a683c7c235e12a0dd0f03b64f8c41f06e1bd23bc Mon Sep 17 00:00:00 2001 From: Yu Wang Date: Thu, 21 Feb 2019 14:07:57 -0800 Subject: [PATCH 203/276] Fixes #2835 Process Accept header MIME types in case-insensitive way Use mime.ParseMediaType to parse the media types in Accept header in manifest request. Ignore the failed ones. Signed-off-by: Yu Wang --- registry/handlers/manifests.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/registry/handlers/manifests.go b/registry/handlers/manifests.go index 8da3f7af..6f2f5178 100644 --- a/registry/handlers/manifests.go +++ b/registry/handlers/manifests.go @@ -3,6 +3,7 @@ package handlers import ( "bytes" "fmt" + "mime" "net/http" "strings" @@ -97,14 +98,10 @@ func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) // we need to split each header value on "," to get the full list of "Accept" values (per RFC 2616) // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 for _, mediaType := range strings.Split(acceptHeader, ",") { - // remove "; q=..." if present - if i := strings.Index(mediaType, ";"); i >= 0 { - mediaType = mediaType[:i] + if mediaType, _, err = mime.ParseMediaType(mediaType); err != nil { + continue } - // it's common (but not required) for Accept values to be space separated ("a/b, c/d, e/f") - mediaType = strings.TrimSpace(mediaType) - if mediaType == schema2.MediaTypeManifest { supports[manifestSchema2] = true } From f9a05061916d095e95566d23e968608ee0576446 Mon Sep 17 00:00:00 2001 From: Vishesh Jindal Date: Wed, 30 Jan 2019 18:35:07 +0530 Subject: [PATCH 204/276] Bugfix: Make ipfilteredby not required Signed-off-by: Vishesh Jindal --- .../middleware/cloudfront/middleware.go | 42 +++++++++++-------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/registry/storage/driver/middleware/cloudfront/middleware.go b/registry/storage/driver/middleware/cloudfront/middleware.go index 83a36a72..bd9031a9 100644 --- a/registry/storage/driver/middleware/cloudfront/middleware.go +++ b/registry/storage/driver/middleware/cloudfront/middleware.go @@ -138,27 +138,33 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o // parse ipfilteredby var awsIPs *awsIPs - if ipFilteredBy := options["ipfilteredby"].(string); ok { - switch strings.ToLower(strings.TrimSpace(ipFilteredBy)) { - case "", "none": - awsIPs = nil - case "aws": - newAWSIPs(ipRangesURL, updateFrequency, nil) - case "awsregion": - var awsRegion []string - if regions, ok := options["awsregion"].(string); ok { - for _, awsRegions := range strings.Split(regions, ",") { - awsRegion = append(awsRegion, strings.ToLower(strings.TrimSpace(awsRegions))) + if i, ok := options["ipfilteredby"]; ok { + if ipFilteredBy, ok := i.(string); ok { + switch strings.ToLower(strings.TrimSpace(ipFilteredBy)) { + case "", "none": + awsIPs = nil + case "aws": + awsIPs = newAWSIPs(ipRangesURL, updateFrequency, nil) + case "awsregion": + var awsRegion []string + if i, ok := options["awsregion"]; ok { + if regions, ok := i.(string); ok { + for _, awsRegions := range strings.Split(regions, ",") { + awsRegion = append(awsRegion, strings.ToLower(strings.TrimSpace(awsRegions))) + } + awsIPs = newAWSIPs(ipRangesURL, updateFrequency, awsRegion) + } else { + return nil, fmt.Errorf("awsRegion must be a comma separated string of valid aws regions") + } + } else { + return nil, fmt.Errorf("awsRegion is not defined") } - awsIPs = newAWSIPs(ipRangesURL, updateFrequency, awsRegion) - } else { - return nil, fmt.Errorf("awsRegion must be a comma separated string of valid aws regions") + default: + return nil, fmt.Errorf("ipfilteredby only allows a string the following value: none|aws|awsregion") } - default: - return nil, fmt.Errorf("ipfilteredby only allows a string the following value: none|aws|awsregion") + } else { + return nil, fmt.Errorf("ipfilteredby only allows a string with the following value: none|aws|awsregion") } - } else { - return nil, fmt.Errorf("ipfilteredby only allows a string with the following value: none|aws|awsregion") } return &cloudFrontStorageMiddleware{ From e1e72e9563743afc1649b23a2f651a7c3caaf369 Mon Sep 17 00:00:00 2001 From: Vishesh Jindal Date: Wed, 30 Jan 2019 18:36:59 +0530 Subject: [PATCH 205/276] Fix cloudfront documentation formatting Signed-off-by: Vishesh Jindal --- docs/configuration.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index c26fa16d..e87dea70 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -706,14 +706,19 @@ interpretation of the options. | `privatekey` | yes | The private key for Cloudfront, provided by AWS. | | `keypairid` | yes | The key pair ID provided by AWS. | | `duration` | no | An integer and unit for the duration of the Cloudfront session. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, or `h`. For example, `3000s` is valid, but `3000 s` is not. If you do not specify a `duration` or you specify an integer without a time unit, the duration defaults to `20m` (20 minutes).| -|`ipfilteredby`|no | A string with the following value `none|aws|awsregion`. | +|`ipfilteredby`|no | A string with the following value `none`, `aws` or `awsregion`. | |`awsregion`|no | A comma separated string of AWS regions, only available when `ipfilteredby` is `awsregion`. For example, `us-east-1, us-west-2`| |`updatefrenquency`|no | The frequency to update AWS IP regions, default: `12h`| |`iprangesurl`|no | The URL contains the AWS IP ranges information, default: `https://ip-ranges.amazonaws.com/ip-ranges.json`| -Then value of ipfilteredby: -`none`: default, do not filter by IP -`aws`: IP from AWS goes to S3 directly -`awsregion`: IP from certain AWS regions goes to S3 directly, use together with `awsregion` + + +Value of `ipfilteredby` can be: + +| Value | Description | +|-------------|------------------------------------| +| `none` | default, do not filter by IP | +| `aws` | IP from AWS goes to S3 directly | +| `awsregion` | IP from certain AWS regions goes to S3 directly, use together with `awsregion`. | ### `redirect` From 6e10631d9c8b28038468c9759df2cde15fc4b674 Mon Sep 17 00:00:00 2001 From: Shawnpku Date: Mon, 4 Mar 2019 14:53:48 +0800 Subject: [PATCH 206/276] fix default cdn auth duration Signed-off-by: Shawnpku --- .../driver/middleware/alicdn/middleware.go | 2 +- .../aliyungo/cdn/auth/random_uuid.go | 97 ------------------- .../aliyungo/cdn/auth/random_uuid_test.go | 21 ---- .../denverdino/aliyungo/cdn/auth/sign_url.go | 80 --------------- .../aliyungo/cdn/auth/sign_url_test.go | 53 ---------- 5 files changed, 1 insertion(+), 252 deletions(-) delete mode 100644 vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid.go delete mode 100644 vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid_test.go delete mode 100644 vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url.go delete mode 100644 vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url_test.go diff --git a/registry/storage/driver/middleware/alicdn/middleware.go b/registry/storage/driver/middleware/alicdn/middleware.go index 1609a9f6..7c5fc732 100755 --- a/registry/storage/driver/middleware/alicdn/middleware.go +++ b/registry/storage/driver/middleware/alicdn/middleware.go @@ -72,7 +72,7 @@ func newAliCDNStorageMiddleware(storageDriver storagedriver.StorageDriver, optio urlSigner := auth.NewURLSigner(authType, privateKey) // parse duration - duration := 60 * time.Minute + duration := 20 * time.Minute d, ok := options["duration"] if ok { switch d := d.(type) { diff --git a/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid.go b/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid.go deleted file mode 100644 index 169a2b6c..00000000 --- a/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid.go +++ /dev/null @@ -1,97 +0,0 @@ -package auth - -import ( - "crypto/rand" - "fmt" - "io" - "os" - "syscall" - "time" -) - -const ( - // Bits is the number of bits in a UUID - Bits = 128 - - // Size is the number of bytes in a UUID - Size = Bits / 8 - - format = "%08x%04x%04x%04x%012x" -) - -var ( - // Loggerf can be used to override the default logging destination. Such - // log messages in this library should be logged at warning or higher. - Loggerf = func(format string, args ...interface{}) {} -) - -// UUID represents a UUID value. UUIDs can be compared and set to other values -// and accessed by byte. -type UUID [Size]byte - -// GenerateUUID creates a new, version 4 uuid. -func GenerateUUID() (u UUID) { - const ( - // ensures we backoff for less than 450ms total. Use the following to - // select new value, in units of 10ms: - // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 - maxretries = 9 - backoff = time.Millisecond * 10 - ) - - var ( - totalBackoff time.Duration - count int - retries int - ) - - for { - // This should never block but the read may fail. Because of this, - // we just try to read the random number generator until we get - // something. This is a very rare condition but may happen. - b := time.Duration(retries) * backoff - time.Sleep(b) - totalBackoff += b - - n, err := io.ReadFull(rand.Reader, u[count:]) - if err != nil { - if retryOnError(err) && retries < maxretries { - count += n - retries++ - Loggerf("error generating version 4 uuid, retrying: %v", err) - continue - } - - // Any other errors represent a system problem. What did someone - // do to /dev/urandom? - panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) - } - - break - } - - u[6] = (u[6] & 0x0f) | 0x40 // set version byte - u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} - - return u -} - -func (u UUID) String() string { - return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) -} - -// retryOnError tries to detect whether or not retrying would be fruitful. -func retryOnError(err error) bool { - switch err := err.(type) { - case *os.PathError: - return retryOnError(err.Err) // unpack the target error - case syscall.Errno: - if err == syscall.EPERM { - // EPERM represents an entropy pool exhaustion, a condition under - // which we backoff and retry. - return true - } - } - - return false -} diff --git a/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid_test.go b/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid_test.go deleted file mode 100644 index d2cb1a4e..00000000 --- a/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package auth - -import ( - "testing" -) - -const iterations = 1000 - -func TestUUID4Generation(t *testing.T) { - for i := 0; i < iterations; i++ { - u := GenerateUUID() - - if u[6]&0xf0 != 0x40 { - t.Fatalf("version byte not correctly set: %v, %08b %08b", u, u[6], u[6]&0xf0) - } - - if u[8]&0xc0 != 0x80 { - t.Fatalf("top order 8th byte not correctly set: %v, %b", u, u[8]) - } - } -} diff --git a/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url.go b/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url.go deleted file mode 100644 index 211bd04f..00000000 --- a/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url.go +++ /dev/null @@ -1,80 +0,0 @@ -package auth - -import ( - "crypto/md5" - "fmt" - "net/url" - "time" -) - -// An URLSigner provides URL signing utilities to sign URLs for Aliyun CDN -// resources. -// authentication document: https://help.aliyun.com/document_detail/85117.html -type URLSigner struct { - authType string - privKey string -} - -// NewURLSigner returns a new signer object. -func NewURLSigner(authType string, privKey string) *URLSigner { - return &URLSigner{ - authType: authType, - privKey: privKey, - } -} - -// Sign returns a signed aliyuncdn url based on authentication type -func (s URLSigner) Sign(uri string, expires time.Time) (string, error) { - r, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf("unable to parse url: %s", uri) - } - - switch s.authType { - case "a": - return aTypeSign(r, s.privKey, expires), nil - case "b": - return bTypeSign(r, s.privKey, expires), nil - case "c": - return cTypeSign(r, s.privKey, expires), nil - default: - return "", fmt.Errorf("invalid authentication type") - } -} - -// sign by A type authentication method. -// authentication document: https://help.aliyun.com/document_detail/85113.html -func aTypeSign(r *url.URL, privateKey string, expires time.Time) string { - //rand is a random uuid without "-" - rand := GenerateUUID().String() - // not use, "0" by default - uid := "0" - secret := fmt.Sprintf("%s-%d-%s-%s-%s", r.Path, expires.Unix(), rand, uid, privateKey) - hashValue := md5.Sum([]byte(secret)) - authKey := fmt.Sprintf("%d-%s-%s-%x", expires.Unix(), rand, uid, hashValue) - if r.RawQuery == "" { - return fmt.Sprintf("%s?auth_key=%s", r.String(), authKey) - } - return fmt.Sprintf("%s&auth_key=%s", r.String(), authKey) - -} - -// sign by B type authentication method. -// authentication document: https://help.aliyun.com/document_detail/85114.html -func bTypeSign(r *url.URL, privateKey string, expires time.Time) string { - formatExp := expires.Format("200601021504") - secret := privateKey + formatExp + r.Path - hashValue := md5.Sum([]byte(secret)) - signURL := fmt.Sprintf("%s://%s/%s/%x%s?%s", r.Scheme, r.Host, formatExp, hashValue, r.Path, r.RawQuery) - return signURL -} - -// sign by C type authentication method. -// authentication document: https://help.aliyun.com/document_detail/85115.html -func cTypeSign(r *url.URL, privateKey string, expires time.Time) string { - hexExp := fmt.Sprintf("%x", expires.Unix()) - secret := privateKey + r.Path + hexExp - hashValue := md5.Sum([]byte(secret)) - signURL := fmt.Sprintf("%s://%s/%x/%s%s?%s", r.Scheme, r.Host, hashValue, hexExp, r.Path, r.RawQuery) - return signURL -} diff --git a/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url_test.go b/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url_test.go deleted file mode 100644 index 46291d48..00000000 --- a/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package auth - -import ( - "crypto/md5" - "fmt" - "net/url" - "reflect" - "testing" - "time" -) - -var ( - testSignTime = time.Unix(1541064730, 0) - testPrivKey = "12345678" -) - -func assertEqual(t *testing.T, name string, x, y interface{}) { - if !reflect.DeepEqual(x, y) { - t.Errorf("%s: Not equal! Expected='%v', Actual='%v'\n", name, x, y) - t.FailNow() - } -} - -func TestAtypeAuth(t *testing.T) { - r, _ := url.Parse("https://example.com/a?foo=bar") - url := aTypeTest(r, testPrivKey, testSignTime) - assertEqual(t, "testTypeA", "https://example.com/a?foo=bar&auth_key=1541064730-0-0-f9dd5ed1e274ab4b1d5f5745344bf28b", url) -} - -func TestBtypeAuth(t *testing.T) { - signer := NewURLSigner("b", testPrivKey) - url, _ := signer.Sign("https://example.com/a?foo=bar", testSignTime) - assertEqual(t, "testTypeB", "https://example.com/201811011732/3a19d83a89ccb00a73212420791b0123/a?foo=bar", url) -} - -func TestCtypeAuth(t *testing.T) { - signer := NewURLSigner("c", testPrivKey) - url, _ := signer.Sign("https://example.com/a?foo=bar", testSignTime) - assertEqual(t, "testTypeC", "https://example.com/7d6b308ce87beb16d9dba32d741220f6/5bdac81a/a?foo=bar", url) -} - -func aTypeTest(r *url.URL, privateKey string, expires time.Time) string { - //rand equals "0" in test case - rand := "0" - uid := "0" - secret := fmt.Sprintf("%s-%d-%s-%s-%s", r.Path, expires.Unix(), rand, uid, privateKey) - hashValue := md5.Sum([]byte(secret)) - authKey := fmt.Sprintf("%d-%s-%s-%x", expires.Unix(), rand, uid, hashValue) - if r.RawQuery == "" { - return fmt.Sprintf("%s?auth_key=%s", r.String(), authKey) - } - return fmt.Sprintf("%s&auth_key=%s", r.String(), authKey) -} From ae91d1f429a2006ce21b4419a3f15b52592ee4da Mon Sep 17 00:00:00 2001 From: Shawn Chen Date: Mon, 4 Mar 2019 17:17:57 +0800 Subject: [PATCH 207/276] fix ci issue Signed-off-by: Shawn Chen --- vendor.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor.conf b/vendor.conf index 12f71672..235f97fb 100644 --- a/vendor.conf +++ b/vendor.conf @@ -7,7 +7,7 @@ github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 -github.com/denverdino/aliyungo 6df11717a253d9c7d4141f9af4deaa7c580cd531 +github.com/denverdino/aliyungo a747050bb1baf06cdd65de7cddc281a2b1c2fde5 github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 From 3390f32aecd35b9c17eb110c9327f175048a5580 Mon Sep 17 00:00:00 2001 From: Shawn Chen Date: Mon, 4 Mar 2019 17:48:32 +0800 Subject: [PATCH 208/276] fix Context issue Signed-off-by: Shawn Chen --- .../driver/middleware/alicdn/middleware.go | 5 +- .../aliyungo/cdn/auth/random_uuid.go | 97 +++++++++++++++++++ .../denverdino/aliyungo/cdn/auth/sign_url.go | 80 +++++++++++++++ .../denverdino/aliyungo/oss/client.go | 14 ++- .../denverdino/aliyungo/oss/signature.go | 48 ++++++--- .../denverdino/aliyungo/util/util.go | 39 ++++++-- 6 files changed, 259 insertions(+), 24 deletions(-) create mode 100644 vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid.go create mode 100644 vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url.go diff --git a/registry/storage/driver/middleware/alicdn/middleware.go b/registry/storage/driver/middleware/alicdn/middleware.go index 7c5fc732..2e281712 100755 --- a/registry/storage/driver/middleware/alicdn/middleware.go +++ b/registry/storage/driver/middleware/alicdn/middleware.go @@ -1,12 +1,13 @@ package middleware import ( + "context" "fmt" "net/url" "strings" "time" - "github.com/docker/distribution/context" + dcontext "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" @@ -99,7 +100,7 @@ func newAliCDNStorageMiddleware(storageDriver storagedriver.StorageDriver, optio func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { if ac.StorageDriver.Name() != "oss" { - context.GetLogger(ctx).Warn("the AliCDN middleware does not support this backend storage driver") + dcontext.GetLogger(ctx).Warn("the AliCDN middleware does not support this backend storage driver") return ac.StorageDriver.URLFor(ctx, path, options) } acURL, err := ac.urlSigner.Sign(ac.baseURL+path, time.Now().Add(ac.duration)) diff --git a/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid.go b/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid.go new file mode 100644 index 00000000..169a2b6c --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/cdn/auth/random_uuid.go @@ -0,0 +1,97 @@ +package auth + +import ( + "crypto/rand" + "fmt" + "io" + "os" + "syscall" + "time" +) + +const ( + // Bits is the number of bits in a UUID + Bits = 128 + + // Size is the number of bytes in a UUID + Size = Bits / 8 + + format = "%08x%04x%04x%04x%012x" +) + +var ( + // Loggerf can be used to override the default logging destination. Such + // log messages in this library should be logged at warning or higher. + Loggerf = func(format string, args ...interface{}) {} +) + +// UUID represents a UUID value. UUIDs can be compared and set to other values +// and accessed by byte. +type UUID [Size]byte + +// GenerateUUID creates a new, version 4 uuid. +func GenerateUUID() (u UUID) { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + ) + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + Loggerf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + u[6] = (u[6] & 0x0f) | 0x40 // set version byte + u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} + + return u +} + +func (u UUID) String() string { + return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url.go b/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url.go new file mode 100644 index 00000000..211bd04f --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/cdn/auth/sign_url.go @@ -0,0 +1,80 @@ +package auth + +import ( + "crypto/md5" + "fmt" + "net/url" + "time" +) + +// An URLSigner provides URL signing utilities to sign URLs for Aliyun CDN +// resources. +// authentication document: https://help.aliyun.com/document_detail/85117.html +type URLSigner struct { + authType string + privKey string +} + +// NewURLSigner returns a new signer object. +func NewURLSigner(authType string, privKey string) *URLSigner { + return &URLSigner{ + authType: authType, + privKey: privKey, + } +} + +// Sign returns a signed aliyuncdn url based on authentication type +func (s URLSigner) Sign(uri string, expires time.Time) (string, error) { + r, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf("unable to parse url: %s", uri) + } + + switch s.authType { + case "a": + return aTypeSign(r, s.privKey, expires), nil + case "b": + return bTypeSign(r, s.privKey, expires), nil + case "c": + return cTypeSign(r, s.privKey, expires), nil + default: + return "", fmt.Errorf("invalid authentication type") + } +} + +// sign by A type authentication method. +// authentication document: https://help.aliyun.com/document_detail/85113.html +func aTypeSign(r *url.URL, privateKey string, expires time.Time) string { + //rand is a random uuid without "-" + rand := GenerateUUID().String() + // not use, "0" by default + uid := "0" + secret := fmt.Sprintf("%s-%d-%s-%s-%s", r.Path, expires.Unix(), rand, uid, privateKey) + hashValue := md5.Sum([]byte(secret)) + authKey := fmt.Sprintf("%d-%s-%s-%x", expires.Unix(), rand, uid, hashValue) + if r.RawQuery == "" { + return fmt.Sprintf("%s?auth_key=%s", r.String(), authKey) + } + return fmt.Sprintf("%s&auth_key=%s", r.String(), authKey) + +} + +// sign by B type authentication method. +// authentication document: https://help.aliyun.com/document_detail/85114.html +func bTypeSign(r *url.URL, privateKey string, expires time.Time) string { + formatExp := expires.Format("200601021504") + secret := privateKey + formatExp + r.Path + hashValue := md5.Sum([]byte(secret)) + signURL := fmt.Sprintf("%s://%s/%s/%x%s?%s", r.Scheme, r.Host, formatExp, hashValue, r.Path, r.RawQuery) + return signURL +} + +// sign by C type authentication method. +// authentication document: https://help.aliyun.com/document_detail/85115.html +func cTypeSign(r *url.URL, privateKey string, expires time.Time) string { + hexExp := fmt.Sprintf("%x", expires.Unix()) + secret := privateKey + r.Path + hexExp + hashValue := md5.Sum([]byte(secret)) + signURL := fmt.Sprintf("%s://%s/%x/%s%s?%s", r.Scheme, r.Host, hashValue, hexExp, r.Path, r.RawQuery) + return signURL +} diff --git a/vendor/github.com/denverdino/aliyungo/oss/client.go b/vendor/github.com/denverdino/aliyungo/oss/client.go index 7b5a55b8..c0919194 100644 --- a/vendor/github.com/denverdino/aliyungo/oss/client.go +++ b/vendor/github.com/denverdino/aliyungo/oss/client.go @@ -851,6 +851,17 @@ func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Va return b.SignedURLWithMethod("GET", path, expires, params, headers) } +func (b *Bucket) SignedURLWithMethodForAssumeRole(method, path string, expires time.Time, params url.Values, headers http.Header) string { + var uv = url.Values{} + if params != nil { + uv = params + } + if len(b.Client.SecurityToken) != 0 { + uv.Set("security-token", b.Client.SecurityToken) + } + return b.SignedURLWithMethod(method, path, expires, params, headers) +} + // SignedURLWithMethod returns a signed URL that allows anyone holding the URL // to either retrieve the object at path or make a HEAD request against it. The signature is valid until expires. func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, params url.Values, headers http.Header) string { @@ -1039,7 +1050,8 @@ func partiallyEscapedPath(path string) string { func (client *Client) prepare(req *request) error { // Copy so they can be mutated without affecting on retries. headers := copyHeader(req.headers) - if len(client.SecurityToken) != 0 { + // security-token should be in either Params or Header, cannot be in both + if len(req.params.Get("security-token")) == 0 && len(client.SecurityToken) != 0 { headers.Set("x-oss-security-token", client.SecurityToken) } diff --git a/vendor/github.com/denverdino/aliyungo/oss/signature.go b/vendor/github.com/denverdino/aliyungo/oss/signature.go index 12677175..4a0f4d97 100644 --- a/vendor/github.com/denverdino/aliyungo/oss/signature.go +++ b/vendor/github.com/denverdino/aliyungo/oss/signature.go @@ -13,26 +13,44 @@ const HeaderOSSPrefix = "x-oss-" var ossParamsToSign = map[string]bool{ "acl": true, + "append": true, + "bucketInfo": true, + "cname": true, + "comp": true, + "cors": true, "delete": true, + "endTime": true, + "img": true, + "lifecycle": true, + "live": true, "location": true, "logging": true, - "notification": true, + "objectMeta": true, "partNumber": true, - "policy": true, - "requestPayment": true, - "torrent": true, - "uploadId": true, - "uploads": true, - "versionId": true, - "versioning": true, - "versions": true, - "response-content-type": true, - "response-content-language": true, - "response-expires": true, + "position": true, + "qos": true, + "referer": true, + "replication": true, + "replicationLocation": true, + "replicationProgress": true, "response-cache-control": true, "response-content-disposition": true, "response-content-encoding": true, - "bucketInfo": true, + "response-content-language": true, + "response-content-type": true, + "response-expires": true, + "security-token": true, + "startTime": true, + "status": true, + "style": true, + "styleName": true, + "symlink": true, + "tagging": true, + "uploadId": true, + "uploads": true, + "vod": true, + "website": true, + "x-oss-process": true, } func (client *Client) signRequest(request *request) { @@ -62,7 +80,7 @@ func (client *Client) signRequest(request *request) { } if len(params) > 0 { - resource = resource + "?" + util.Encode(params) + resource = resource + "?" + util.EncodeWithoutEscape(params) } canonicalizedResource := resource @@ -74,7 +92,7 @@ func (client *Client) signRequest(request *request) { //log.Println("stringToSign: ", stringToSign) signature := util.CreateSignature(stringToSign, client.AccessKeySecret) - if query.Get("OSSAccessKeyId") != "" { + if urlSignature { query.Set("Signature", signature) } else { headers.Set("Authorization", "OSS "+client.AccessKeyId+":"+signature) diff --git a/vendor/github.com/denverdino/aliyungo/util/util.go b/vendor/github.com/denverdino/aliyungo/util/util.go index ff49f2d8..15a990da 100644 --- a/vendor/github.com/denverdino/aliyungo/util/util.go +++ b/vendor/github.com/denverdino/aliyungo/util/util.go @@ -4,13 +4,13 @@ import ( "bytes" srand "crypto/rand" "encoding/binary" + "encoding/json" + "fmt" "math/rand" "net/http" "net/url" "sort" "time" - "fmt" - "encoding/json" ) const dictionary = "_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" @@ -66,6 +66,34 @@ func Encode(v url.Values) string { return buf.String() } +// Like Encode, but key and value are not escaped +func EncodeWithoutEscape(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := k + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + if v != "" { + buf.WriteString("=") + buf.WriteString(v) + } + } + } + return buf.String() +} + func GetGMTime() string { return time.Now().UTC().Format(http.TimeFormat) } @@ -148,11 +176,10 @@ func GenerateRandomECSPassword() string { } - func PrettyJson(object interface{}) string { - b,err := json.MarshalIndent(object,"", " ") + b, err := json.MarshalIndent(object, "", " ") if err != nil { - fmt.Printf("ERROR: PrettyJson, %v\n %s\n",err,b) + fmt.Printf("ERROR: PrettyJson, %v\n %s\n", err, b) } return string(b) -} \ No newline at end of file +} From f87772650309d03049310dd7d623449554a10147 Mon Sep 17 00:00:00 2001 From: Eohyung Lee Date: Wed, 25 Apr 2018 23:31:07 +0900 Subject: [PATCH 209/276] Fix s3 driver for supporting ceph radosgw Radosgw does not support S3 `GET Bucket` API v2 API but v1. This API has backward compatibility, so most of this API is working correctly but we can not get `KeyCount` in v1 API and which is only for v2 API. Signed-off-by: Eohyung Lee --- registry/storage/driver/s3-aws/s3.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 126a07f6..1f610f79 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -970,8 +970,16 @@ func (d *driver) doWalk(parentCtx context.Context, objectCount *int64, path, pre defer done("s3aws.ListObjectsV2Pages(%s)", path) listObjectErr := d.S3.ListObjectsV2PagesWithContext(ctx, listObjectsInput, func(objects *s3.ListObjectsV2Output, lastPage bool) bool { - *objectCount += *objects.KeyCount - walkInfos := make([]walkInfoContainer, 0, *objects.KeyCount) + var count int64 + if objects.KeyCount != nil { + count = *objects.KeyCount + *objectCount += *objects.KeyCount + } else { + count = int64(len(objects.Contents) + len(objects.CommonPrefixes)) + *objectCount += count + } + + walkInfos := make([]walkInfoContainer, 0, count) for _, dir := range objects.CommonPrefixes { commonPrefix := *dir.Prefix From c18c6c33b24010b3bfd3c49539f44c49681b4981 Mon Sep 17 00:00:00 2001 From: Thomas Berger Date: Fri, 15 Mar 2019 21:05:21 +0100 Subject: [PATCH 210/276] S3 Driver: added comment for missing KeyCount workaround Signed-off-by: Thomas Berger --- registry/storage/driver/s3-aws/s3.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go index 1f610f79..4b6c50d7 100644 --- a/registry/storage/driver/s3-aws/s3.go +++ b/registry/storage/driver/s3-aws/s3.go @@ -971,6 +971,9 @@ func (d *driver) doWalk(parentCtx context.Context, objectCount *int64, path, pre listObjectErr := d.S3.ListObjectsV2PagesWithContext(ctx, listObjectsInput, func(objects *s3.ListObjectsV2Output, lastPage bool) bool { var count int64 + // KeyCount was introduced with version 2 of the GET Bucket operation in S3. + // Some S3 implementations don't support V2 now, so we fall back to manual + // calculation of the key count if required if objects.KeyCount != nil { count = *objects.KeyCount *objectCount += *objects.KeyCount From fd77cf43a6920459c59bb7d5df4e2bdd0e2159f9 Mon Sep 17 00:00:00 2001 From: Shawn Chen Date: Mon, 18 Mar 2019 11:25:35 +0800 Subject: [PATCH 211/276] change package name & format document Signed-off-by: Shawn Chen --- docs/configuration.md | 12 ++++++------ .../storage/driver/middleware/alicdn/middleware.go | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index 10245bf1..e1a708d0 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -719,12 +719,12 @@ Then value of ipfilteredby: `alicdn` storage middleware allows the registry to serve layers via a content delivery network provided by Alibaba Cloud. Alicdn requires the OSS storage driver. -| Parameter | Required | Description | -|-----------|----------|-------------------------------------------------------| -| `baseurl` | yes | The `SCHEME://HOST` at which Alicdn is served. | -| `authtype` | yes | The URL authentication type for Alicdn, which should be `a`, `b` or `c`. | -| `privatekey` | yes | The URL authentication key for Alicdn. | -| `duration` | no | An integer and unit for the duration of the Alicdn session. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, or `h`.| +| Parameter | Required | Description | +|--------------|----------|-------------------------------------------------------------------------| +| `baseurl` | yes | The `SCHEME://HOST` at which Alicdn is served. | +| `authtype` | yes | The URL authentication type for Alicdn, which should be `a`, `b` or `c`. See the [Authentication configuration](https://www.alibabacloud.com/help/doc-detail/85117.htm).| +| `privatekey` | yes | The URL authentication key for Alicdn. | +| `duration` | no | An integer and unit for the duration of the Alicdn session. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, or `h`.| ### `redirect` diff --git a/registry/storage/driver/middleware/alicdn/middleware.go b/registry/storage/driver/middleware/alicdn/middleware.go index 2e281712..51993161 100755 --- a/registry/storage/driver/middleware/alicdn/middleware.go +++ b/registry/storage/driver/middleware/alicdn/middleware.go @@ -1,4 +1,4 @@ -package middleware +package alicdn import ( "context" @@ -27,7 +27,7 @@ type aliCDNStorageMiddleware struct { var _ storagedriver.StorageDriver = &aliCDNStorageMiddleware{} // newAliCDNStorageMiddleware constructs and returns a new AliCDN -// layerHandler implementation. +// StorageDriver implementation. // Required options: baseurl, authtype, privatekey // Optional options: duration func newAliCDNStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { From 51bb5cee5b6ad10b0c7856164e7fce0823eb8374 Mon Sep 17 00:00:00 2001 From: Shawn Chen Date: Tue, 19 Mar 2019 15:06:02 +0800 Subject: [PATCH 212/276] import alicdn package Signed-off-by: Shawn Chen --- cmd/registry/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/registry/main.go b/cmd/registry/main.go index 5beaa563..06a3cf19 100644 --- a/cmd/registry/main.go +++ b/cmd/registry/main.go @@ -12,6 +12,7 @@ import ( _ "github.com/docker/distribution/registry/storage/driver/filesystem" _ "github.com/docker/distribution/registry/storage/driver/gcs" _ "github.com/docker/distribution/registry/storage/driver/inmemory" + _ "github.com/docker/distribution/registry/storage/driver/middleware/alicdn" _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" _ "github.com/docker/distribution/registry/storage/driver/middleware/redirect" _ "github.com/docker/distribution/registry/storage/driver/oss" From 74f429a5ad400894cd0a9cf96a6204696887b37a Mon Sep 17 00:00:00 2001 From: Jesse Brown Date: Fri, 5 Apr 2019 14:20:20 -0500 Subject: [PATCH 213/276] Fix typo: offest -> offset Signed-off-by: Jesse Brown --- registry/handlers/blobupload.go | 2 +- registry/storage/driver/testsuites/testsuites.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/registry/handlers/blobupload.go b/registry/handlers/blobupload.go index b6bfe302..3dc5b75f 100644 --- a/registry/handlers/blobupload.go +++ b/registry/handlers/blobupload.go @@ -77,7 +77,7 @@ func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { if size := upload.Size(); size != buh.State.Offset { defer upload.Close() - dcontext.GetLogger(ctx).Errorf("upload resumed at wrong offest: %d != %d", size, buh.State.Offset) + dcontext.GetLogger(ctx).Errorf("upload resumed at wrong offset: %d != %d", size, buh.State.Offset) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) upload.Cancel(buh) diff --git a/registry/storage/driver/testsuites/testsuites.go b/registry/storage/driver/testsuites/testsuites.go index 5e37c5f3..7cf7b379 100644 --- a/registry/storage/driver/testsuites/testsuites.go +++ b/registry/storage/driver/testsuites/testsuites.go @@ -345,7 +345,7 @@ func (suite *DriverSuite) TestReaderWithOffset(c *check.C) { c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contentsChunk3) - // Ensure we get invalid offest for negative offsets. + // Ensure we get invalid offset for negative offsets. reader, err = suite.StorageDriver.Reader(suite.ctx, filename, -1) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) From 5afbf324000f8c9675ef02b9a7f88948f0c82a0e Mon Sep 17 00:00:00 2001 From: Sevki Hasirci Date: Sun, 14 Apr 2019 11:05:59 +0100 Subject: [PATCH 214/276] fix no error returned in fetchTokenWithOAuth fetchTokenWithBasicAuth checks if a token is in the token response but fetchTokenWithOAuth does not these changes implements the same behaviour for the latter returning a `ErrNoToken` if a token is not found in the resposne Signed-off-by: Sevki Hasirci --- registry/client/auth/session.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/registry/client/auth/session.go b/registry/client/auth/session.go index aad8a0e6..5d2322f3 100644 --- a/registry/client/auth/session.go +++ b/registry/client/auth/session.go @@ -366,6 +366,10 @@ func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, servic return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) } + if tr.AccessToken == "" { + return "", time.Time{}, ErrNoToken + } + if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { th.creds.SetRefreshToken(realm, service, tr.RefreshToken) } From 0e2d080a8a86f64e2260ec8a1047b3a1a4916872 Mon Sep 17 00:00:00 2001 From: Damien Mathieu Date: Thu, 9 May 2019 14:07:58 +0200 Subject: [PATCH 215/276] append the written bytes to the blob writer's size Any byte written should append to the size. Otherwise, the full Size is always zero. Signed-off-by: Damien Mathieu --- registry/client/blob_writer.go | 4 +- registry/client/blob_writer_test.go | 88 +++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 2 deletions(-) diff --git a/registry/client/blob_writer.go b/registry/client/blob_writer.go index 695bf852..cc6e88ca 100644 --- a/registry/client/blob_writer.go +++ b/registry/client/blob_writer.go @@ -64,8 +64,8 @@ func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { return 0, fmt.Errorf("bad range format: %s", rng) } + hbu.offset += end - start + 1 return (end - start + 1), nil - } func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { @@ -99,8 +99,8 @@ func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("bad range format: %s", rng) } + hbu.offset += int64(end - start + 1) return (end - start + 1), nil - } func (hbu *httpBlobUpload) Size() int64 { diff --git a/registry/client/blob_writer_test.go b/registry/client/blob_writer_test.go index 099dca4f..21b3b3d2 100644 --- a/registry/client/blob_writer_test.go +++ b/registry/client/blob_writer_test.go @@ -209,3 +209,91 @@ func TestUploadReadFrom(t *testing.T) { t.Fatalf("Unexpected response status: %s, expected %s", uploadErr.Status, expected) } } + +func TestUploadSize(t *testing.T) { + _, b := newRandomBlob(64) + readFromLocationPath := "/v2/test/upload/readfrom/uploads/testid" + writeLocationPath := "/v2/test/upload/readfrom/uploads/testid" + + m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ + { + Request: testutil.Request{ + Method: "GET", + Route: "/v2/", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Docker-Distribution-API-Version": {"registry/2.0"}, + }), + }, + }, + { + Request: testutil.Request{ + Method: "PATCH", + Route: readFromLocationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {readFromLocationPath}, + "Range": {"0-63"}, + }), + }, + }, + { + Request: testutil.Request{ + Method: "PATCH", + Route: writeLocationPath, + Body: b, + }, + Response: testutil.Response{ + StatusCode: http.StatusAccepted, + Headers: http.Header(map[string][]string{ + "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, + "Location": {writeLocationPath}, + "Range": {"0-63"}, + }), + }, + }, + }) + + e, c := testServer(m) + defer c() + + // Writing with ReadFrom + blobUpload := &httpBlobUpload{ + client: &http.Client{}, + location: e + readFromLocationPath, + } + + if blobUpload.Size() != 0 { + t.Fatalf("Wrong size returned from Size: %d, expected 0", blobUpload.Size()) + } + + _, err := blobUpload.ReadFrom(bytes.NewReader(b)) + if err != nil { + t.Fatalf("Error calling ReadFrom: %s", err) + } + + if blobUpload.Size() != 64 { + t.Fatalf("Wrong size returned from Size: %d, expected 64", blobUpload.Size()) + } + + // Writing with Write + blobUpload = &httpBlobUpload{ + client: &http.Client{}, + location: e + writeLocationPath, + } + + _, err = blobUpload.Write(b) + if err != nil { + t.Fatalf("Error calling Write: %s", err) + } + + if blobUpload.Size() != 64 { + t.Fatalf("Wrong size returned from Size: %d, expected 64", blobUpload.Size()) + } +} From 8f9c8094fbe639e6b4e56e5c574932e629b145ef Mon Sep 17 00:00:00 2001 From: Tariq Ibrahim Date: Wed, 15 May 2019 17:21:50 -0700 Subject: [PATCH 216/276] replace rsc.io/letsencrypt in favour of golang.org/x/crypto Signed-off-by: Tariq Ibrahim --- docs/configuration.md | 4 +- registry/registry.go | 32 +- vendor.conf | 7 +- vendor/github.com/aws/aws-sdk-go/go.mod | 6 + vendor/github.com/miekg/dns/LICENSE | 32 - vendor/github.com/miekg/dns/README.md | 154 - vendor/github.com/miekg/dns/client.go | 455 - vendor/github.com/miekg/dns/clientconfig.go | 99 - vendor/github.com/miekg/dns/dane.go | 44 - vendor/github.com/miekg/dns/defaults.go | 282 - vendor/github.com/miekg/dns/dns.go | 104 - vendor/github.com/miekg/dns/dnssec.go | 721 -- vendor/github.com/miekg/dns/dnssec_keygen.go | 156 - vendor/github.com/miekg/dns/dnssec_keyscan.go | 249 - vendor/github.com/miekg/dns/dnssec_privkey.go | 85 - vendor/github.com/miekg/dns/doc.go | 251 - vendor/github.com/miekg/dns/edns.go | 597 -- vendor/github.com/miekg/dns/format.go | 87 - vendor/github.com/miekg/dns/generate.go | 159 - vendor/github.com/miekg/dns/labels.go | 168 - vendor/github.com/miekg/dns/msg.go | 1231 --- vendor/github.com/miekg/dns/msg_helpers.go | 630 -- vendor/github.com/miekg/dns/nsecx.go | 119 - vendor/github.com/miekg/dns/privaterr.go | 149 - vendor/github.com/miekg/dns/rawmsg.go | 49 - vendor/github.com/miekg/dns/reverse.go | 38 - vendor/github.com/miekg/dns/sanitize.go | 84 - vendor/github.com/miekg/dns/scan.go | 981 -- vendor/github.com/miekg/dns/scan_rr.go | 2179 ---- vendor/github.com/miekg/dns/scanner.go | 43 - vendor/github.com/miekg/dns/server.go | 734 -- vendor/github.com/miekg/dns/sig0.go | 219 - vendor/github.com/miekg/dns/singleinflight.go | 57 - vendor/github.com/miekg/dns/smimea.go | 47 - vendor/github.com/miekg/dns/tlsa.go | 47 - vendor/github.com/miekg/dns/tsig.go | 384 - vendor/github.com/miekg/dns/types.go | 1294 --- vendor/github.com/miekg/dns/udp.go | 58 - vendor/github.com/miekg/dns/udp_linux.go | 73 - vendor/github.com/miekg/dns/udp_other.go | 17 - vendor/github.com/miekg/dns/udp_plan9.go | 34 - vendor/github.com/miekg/dns/udp_windows.go | 34 - vendor/github.com/miekg/dns/update.go | 106 - vendor/github.com/miekg/dns/xfr.go | 244 - vendor/github.com/miekg/dns/zmsg.go | 3529 ------- vendor/github.com/miekg/dns/ztypes.go | 842 -- vendor/github.com/xenolf/lego/LICENSE | 21 - vendor/github.com/xenolf/lego/README.md | 248 - .../github.com/xenolf/lego/acme/challenges.go | 16 - vendor/github.com/xenolf/lego/acme/client.go | 702 -- vendor/github.com/xenolf/lego/acme/crypto.go | 323 - .../xenolf/lego/acme/dns_challenge.go | 279 - .../xenolf/lego/acme/dns_challenge_manual.go | 53 - vendor/github.com/xenolf/lego/acme/error.go | 86 - vendor/github.com/xenolf/lego/acme/http.go | 120 - .../xenolf/lego/acme/http_challenge.go | 41 - .../xenolf/lego/acme/http_challenge_server.go | 79 - vendor/github.com/xenolf/lego/acme/jws.go | 109 - .../github.com/xenolf/lego/acme/messages.go | 116 - .../xenolf/lego/acme/pop_challenge.go | 1 - .../github.com/xenolf/lego/acme/provider.go | 28 - .../xenolf/lego/acme/tls_sni_challenge.go | 67 - .../lego/acme/tls_sni_challenge_server.go | 62 - vendor/github.com/xenolf/lego/acme/utils.go | 29 - vendor/golang.org/x/crypto/README | 3 - vendor/golang.org/x/crypto/README.md | 21 + vendor/golang.org/x/crypto/acme/acme.go | 922 ++ .../x/crypto/acme/autocert/autocert.go | 1139 +++ .../x/crypto/acme/autocert/cache.go | 130 + .../x/crypto/acme/autocert/listener.go | 157 + .../x/crypto/acme/autocert/renewal.go | 141 + vendor/golang.org/x/crypto/acme/http.go | 281 + vendor/golang.org/x/crypto/acme/jws.go | 153 + vendor/golang.org/x/crypto/acme/types.go | 329 + vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 11 +- vendor/golang.org/x/crypto/blowfish/cipher.go | 4 +- vendor/golang.org/x/crypto/blowfish/const.go | 2 +- .../x/crypto/curve25519/const_amd64.h | 8 + .../x/crypto/curve25519/const_amd64.s | 20 + .../x/crypto/curve25519/cswap_amd64.s | 65 + .../x/crypto/curve25519/curve25519.go | 834 ++ vendor/golang.org/x/crypto/curve25519/doc.go | 23 + .../x/crypto/curve25519/freeze_amd64.s | 73 + .../x/crypto/curve25519/ladderstep_amd64.s | 1377 +++ .../x/crypto/curve25519/mont25519_amd64.go | 240 + .../x/crypto/curve25519/mul_amd64.s | 169 + .../x/crypto/curve25519/square_amd64.s | 132 + vendor/golang.org/x/crypto/ocsp/ocsp.go | 592 -- .../x/crypto/otr/libotr_test_helper.c | 166 +- vendor/golang.org/x/crypto/otr/otr.go | 39 +- vendor/golang.org/x/crypto/ssh/test/doc.go | 7 + .../x/crypto/ssh/test/sshd_test_pw.c | 173 + vendor/golang.org/x/net/publicsuffix/list.go | 133 - vendor/golang.org/x/net/publicsuffix/table.go | 8786 ----------------- vendor/golang.org/x/time/LICENSE | 27 - vendor/golang.org/x/time/PATENTS | 22 - vendor/golang.org/x/time/README | 1 - vendor/golang.org/x/time/rate/rate.go | 368 - vendor/gopkg.in/square/go-jose.v1/LICENSE | 202 - vendor/gopkg.in/square/go-jose.v1/README.md | 209 - .../gopkg.in/square/go-jose.v1/asymmetric.go | 498 - .../square/go-jose.v1/cipher/cbc_hmac.go | 196 - .../square/go-jose.v1/cipher/concat_kdf.go | 75 - .../square/go-jose.v1/cipher/ecdh_es.go | 51 - .../square/go-jose.v1/cipher/key_wrap.go | 109 - vendor/gopkg.in/square/go-jose.v1/crypter.go | 349 - vendor/gopkg.in/square/go-jose.v1/doc.go | 26 - vendor/gopkg.in/square/go-jose.v1/encoding.go | 191 - .../gopkg.in/square/go-jose.v1/json/LICENSE | 27 - .../gopkg.in/square/go-jose.v1/json/README.md | 13 - .../gopkg.in/square/go-jose.v1/json/decode.go | 1183 --- .../gopkg.in/square/go-jose.v1/json/encode.go | 1197 --- .../gopkg.in/square/go-jose.v1/json/indent.go | 141 - .../square/go-jose.v1/json/scanner.go | 623 -- .../gopkg.in/square/go-jose.v1/json/stream.go | 480 - .../gopkg.in/square/go-jose.v1/json/tags.go | 44 - .../gopkg.in/square/go-jose.v1/json_fork.go | 31 - vendor/gopkg.in/square/go-jose.v1/json_std.go | 31 - vendor/gopkg.in/square/go-jose.v1/jwe.go | 278 - vendor/gopkg.in/square/go-jose.v1/jwk.go | 380 - vendor/gopkg.in/square/go-jose.v1/jws.go | 252 - vendor/gopkg.in/square/go-jose.v1/shared.go | 224 - vendor/gopkg.in/square/go-jose.v1/signing.go | 218 - .../gopkg.in/square/go-jose.v1/symmetric.go | 349 - vendor/gopkg.in/square/go-jose.v1/utils.go | 74 - vendor/gopkg.in/yaml.v2/go.mod | 5 + vendor/rsc.io/letsencrypt/LICENSE | 27 - vendor/rsc.io/letsencrypt/README | 177 - vendor/rsc.io/letsencrypt/lets.go | 781 -- 129 files changed, 6555 insertions(+), 37728 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/go.mod delete mode 100644 vendor/github.com/miekg/dns/LICENSE delete mode 100644 vendor/github.com/miekg/dns/README.md delete mode 100644 vendor/github.com/miekg/dns/client.go delete mode 100644 vendor/github.com/miekg/dns/clientconfig.go delete mode 100644 vendor/github.com/miekg/dns/dane.go delete mode 100644 vendor/github.com/miekg/dns/defaults.go delete mode 100644 vendor/github.com/miekg/dns/dns.go delete mode 100644 vendor/github.com/miekg/dns/dnssec.go delete mode 100644 vendor/github.com/miekg/dns/dnssec_keygen.go delete mode 100644 vendor/github.com/miekg/dns/dnssec_keyscan.go delete mode 100644 vendor/github.com/miekg/dns/dnssec_privkey.go delete mode 100644 vendor/github.com/miekg/dns/doc.go delete mode 100644 vendor/github.com/miekg/dns/edns.go delete mode 100644 vendor/github.com/miekg/dns/format.go delete mode 100644 vendor/github.com/miekg/dns/generate.go delete mode 100644 vendor/github.com/miekg/dns/labels.go delete mode 100644 vendor/github.com/miekg/dns/msg.go delete mode 100644 vendor/github.com/miekg/dns/msg_helpers.go delete mode 100644 vendor/github.com/miekg/dns/nsecx.go delete mode 100644 vendor/github.com/miekg/dns/privaterr.go delete mode 100644 vendor/github.com/miekg/dns/rawmsg.go delete mode 100644 vendor/github.com/miekg/dns/reverse.go delete mode 100644 vendor/github.com/miekg/dns/sanitize.go delete mode 100644 vendor/github.com/miekg/dns/scan.go delete mode 100644 vendor/github.com/miekg/dns/scan_rr.go delete mode 100644 vendor/github.com/miekg/dns/scanner.go delete mode 100644 vendor/github.com/miekg/dns/server.go delete mode 100644 vendor/github.com/miekg/dns/sig0.go delete mode 100644 vendor/github.com/miekg/dns/singleinflight.go delete mode 100644 vendor/github.com/miekg/dns/smimea.go delete mode 100644 vendor/github.com/miekg/dns/tlsa.go delete mode 100644 vendor/github.com/miekg/dns/tsig.go delete mode 100644 vendor/github.com/miekg/dns/types.go delete mode 100644 vendor/github.com/miekg/dns/udp.go delete mode 100644 vendor/github.com/miekg/dns/udp_linux.go delete mode 100644 vendor/github.com/miekg/dns/udp_other.go delete mode 100644 vendor/github.com/miekg/dns/udp_plan9.go delete mode 100644 vendor/github.com/miekg/dns/udp_windows.go delete mode 100644 vendor/github.com/miekg/dns/update.go delete mode 100644 vendor/github.com/miekg/dns/xfr.go delete mode 100644 vendor/github.com/miekg/dns/zmsg.go delete mode 100644 vendor/github.com/miekg/dns/ztypes.go delete mode 100644 vendor/github.com/xenolf/lego/LICENSE delete mode 100644 vendor/github.com/xenolf/lego/README.md delete mode 100644 vendor/github.com/xenolf/lego/acme/challenges.go delete mode 100644 vendor/github.com/xenolf/lego/acme/client.go delete mode 100644 vendor/github.com/xenolf/lego/acme/crypto.go delete mode 100644 vendor/github.com/xenolf/lego/acme/dns_challenge.go delete mode 100644 vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go delete mode 100644 vendor/github.com/xenolf/lego/acme/error.go delete mode 100644 vendor/github.com/xenolf/lego/acme/http.go delete mode 100644 vendor/github.com/xenolf/lego/acme/http_challenge.go delete mode 100644 vendor/github.com/xenolf/lego/acme/http_challenge_server.go delete mode 100644 vendor/github.com/xenolf/lego/acme/jws.go delete mode 100644 vendor/github.com/xenolf/lego/acme/messages.go delete mode 100644 vendor/github.com/xenolf/lego/acme/pop_challenge.go delete mode 100644 vendor/github.com/xenolf/lego/acme/provider.go delete mode 100644 vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go delete mode 100644 vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go delete mode 100644 vendor/github.com/xenolf/lego/acme/utils.go delete mode 100644 vendor/golang.org/x/crypto/README create mode 100644 vendor/golang.org/x/crypto/README.md create mode 100644 vendor/golang.org/x/crypto/acme/acme.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/autocert.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/cache.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/listener.go create mode 100644 vendor/golang.org/x/crypto/acme/autocert/renewal.go create mode 100644 vendor/golang.org/x/crypto/acme/http.go create mode 100644 vendor/golang.org/x/crypto/acme/jws.go create mode 100644 vendor/golang.org/x/crypto/acme/types.go create mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.h create mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/cswap_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go create mode 100644 vendor/golang.org/x/crypto/curve25519/doc.go create mode 100644 vendor/golang.org/x/crypto/curve25519/freeze_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go create mode 100644 vendor/golang.org/x/crypto/curve25519/mul_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/square_amd64.s delete mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/doc.go create mode 100644 vendor/golang.org/x/crypto/ssh/test/sshd_test_pw.c delete mode 100644 vendor/golang.org/x/net/publicsuffix/list.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/table.go delete mode 100644 vendor/golang.org/x/time/LICENSE delete mode 100644 vendor/golang.org/x/time/PATENTS delete mode 100644 vendor/golang.org/x/time/README delete mode 100644 vendor/golang.org/x/time/rate/rate.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/LICENSE delete mode 100644 vendor/gopkg.in/square/go-jose.v1/README.md delete mode 100644 vendor/gopkg.in/square/go-jose.v1/asymmetric.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/crypter.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/doc.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/encoding.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/LICENSE delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/README.md delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/decode.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/encode.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/indent.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/scanner.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/stream.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json/tags.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json_fork.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/json_std.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/jwe.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/jwk.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/jws.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/shared.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/signing.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/symmetric.go delete mode 100644 vendor/gopkg.in/square/go-jose.v1/utils.go create mode 100644 vendor/gopkg.in/yaml.v2/go.mod delete mode 100644 vendor/rsc.io/letsencrypt/LICENSE delete mode 100644 vendor/rsc.io/letsencrypt/README delete mode 100644 vendor/rsc.io/letsencrypt/lets.go diff --git a/docs/configuration.md b/docs/configuration.md index c6fc18e9..8bbc67cb 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -848,7 +848,9 @@ TLS certificates provided by > to the `docker run` command or using a similar setting in a cloud > configuration. You should also set the `hosts` option to the list of hostnames > that are valid for this registry to avoid trying to get certificates for random -> hostnames due to malicious clients connecting with bogus SNI hostnames. +> hostnames due to malicious clients connecting with bogus SNI hostnames. Please +> ensure that you have the `ca-certificates` package installed in order to verify +> letsencrypt certificates. | Parameter | Required | Description | |-----------|----------|-------------------------------------------------------| diff --git a/registry/registry.go b/registry/registry.go index 03ff3fd7..e4fe36cb 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -12,11 +12,17 @@ import ( "syscall" "time" - "rsc.io/letsencrypt" - "github.com/Shopify/logrus-bugsnag" logstash "github.com/bshuster-repo/logrus-logstash-hook" "github.com/bugsnag/bugsnag-go" + "github.com/docker/go-metrics" + gorhandlers "github.com/gorilla/handlers" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/yvasiyarov/gorelic" + "golang.org/x/crypto/acme" + "golang.org/x/crypto/acme/autocert" + "github.com/docker/distribution/configuration" dcontext "github.com/docker/distribution/context" "github.com/docker/distribution/health" @@ -24,11 +30,6 @@ import ( "github.com/docker/distribution/registry/listener" "github.com/docker/distribution/uuid" "github.com/docker/distribution/version" - "github.com/docker/go-metrics" - gorhandlers "github.com/gorilla/handlers" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "github.com/yvasiyarov/gorelic" ) // this channel gets notified when process receives signal. It is global to ease unit testing @@ -170,19 +171,14 @@ func (registry *Registry) ListenAndServe() error { if config.HTTP.TLS.Certificate != "" { return fmt.Errorf("cannot specify both certificate and Let's Encrypt") } - var m letsencrypt.Manager - if err := m.CacheFile(config.HTTP.TLS.LetsEncrypt.CacheFile); err != nil { - return err - } - if !m.Registered() { - if err := m.Register(config.HTTP.TLS.LetsEncrypt.Email, nil); err != nil { - return err - } - } - if len(config.HTTP.TLS.LetsEncrypt.Hosts) > 0 { - m.SetHosts(config.HTTP.TLS.LetsEncrypt.Hosts) + m := &autocert.Manager{ + HostPolicy: autocert.HostWhitelist(config.HTTP.TLS.LetsEncrypt.Hosts...), + Cache: autocert.DirCache(config.HTTP.TLS.LetsEncrypt.CacheFile), + Email: config.HTTP.TLS.LetsEncrypt.Email, + Prompt: autocert.AcceptTOS, } tlsConf.GetCertificate = m.GetCertificate + tlsConf.NextProtos = append(tlsConf.NextProtos, acme.ALPNProto) } else { tlsConf.Certificates = make([]tls.Certificate, 1) tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) diff --git a/vendor.conf b/vendor.conf index 235f97fb..35bda492 100644 --- a/vendor.conf +++ b/vendor.conf @@ -21,7 +21,6 @@ github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c -github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6 github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 @@ -31,21 +30,17 @@ github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 -github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 -golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b +golang.org/x/crypto e84da0312774c21d64ee2317962ef669b27ffb41 golang.org/x/net 4876518f9e71663000c348837735820161a42df7 golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 -gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b gopkg.in/yaml.v2 v2.2.1 -rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882 diff --git a/vendor/github.com/aws/aws-sdk-go/go.mod b/vendor/github.com/aws/aws-sdk-go/go.mod new file mode 100644 index 00000000..875a778e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/go.mod @@ -0,0 +1,6 @@ +module github.com/aws/aws-sdk-go + +require ( + github.com/go-ini/ini v1.25.4 + github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 +) diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE deleted file mode 100644 index 5763fa7f..00000000 --- a/vendor/github.com/miekg/dns/LICENSE +++ /dev/null @@ -1,32 +0,0 @@ -Extensions of the original work are copyright (c) 2011 Miek Gieben - -As this is fork of the official Go code the same license applies: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md deleted file mode 100644 index 0e3356cb..00000000 --- a/vendor/github.com/miekg/dns/README.md +++ /dev/null @@ -1,154 +0,0 @@ -[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) [![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns) - -# Alternative (more granular) approach to a DNS library - -> Less is more. - -Complete and usable DNS library. All widely used Resource Records are -supported, including the DNSSEC types. It follows a lean and mean philosophy. -If there is stuff you should know as a DNS programmer there isn't a convenience -function for it. Server side and client side programming is supported, i.e. you -can build servers and resolvers with it. - -We try to keep the "master" branch as sane as possible and at the bleeding edge -of standards, avoiding breaking changes wherever reasonable. We support the last -two versions of Go, currently: 1.5 and 1.6. - -# Goals - -* KISS; -* Fast; -* Small API, if its easy to code in Go, don't make a function for it. - -# Users - -A not-so-up-to-date-list-that-may-be-actually-current: - -* https://cloudflare.com -* https://github.com/abh/geodns -* http://www.statdns.com/ -* http://www.dnsinspect.com/ -* https://github.com/chuangbo/jianbing-dictionary-dns -* http://www.dns-lg.com/ -* https://github.com/fcambus/rrda -* https://github.com/kenshinx/godns -* https://github.com/skynetservices/skydns -* https://github.com/hashicorp/consul -* https://github.com/DevelopersPL/godnsagent -* https://github.com/duedil-ltd/discodns -* https://github.com/StalkR/dns-reverse-proxy -* https://github.com/tianon/rawdns -* https://mesosphere.github.io/mesos-dns/ -* https://pulse.turbobytes.com/ -* https://play.google.com/store/apps/details?id=com.turbobytes.dig -* https://github.com/fcambus/statzone -* https://github.com/benschw/dns-clb-go -* https://github.com/corny/dnscheck for http://public-dns.info/ -* https://namesmith.io -* https://github.com/miekg/unbound -* https://github.com/miekg/exdns -* https://dnslookup.org -* https://github.com/looterz/grimd -* https://github.com/phamhongviet/serf-dns -* https://github.com/mehrdadrad/mylg -* https://github.com/bamarni/dockness -* https://github.com/fffaraz/microdns - -Send pull request if you want to be listed here. - -# Features - -* UDP/TCP queries, IPv4 and IPv6; -* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported; -* Fast: - * Reply speed around ~ 80K qps (faster hardware results in more qps); - * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds; -* Server side programming (mimicking the net/http package); -* Client side programming; -* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA; -* EDNS0, NSID, Cookies; -* AXFR/IXFR; -* TSIG, SIG(0); -* DNS over TLS: optional encrypted connection between client and server; -* DNS name compression; -* Depends only on the standard library. - -Have fun! - -Miek Gieben - 2010-2012 - - -# Building - -Building is done with the `go` tool. If you have setup your GOPATH -correctly, the following should work: - - go get github.com/miekg/dns - go build github.com/miekg/dns - -## Examples - -A short "how to use the API" is at the beginning of doc.go (this also will show -when you call `godoc github.com/miekg/dns`). - -Example programs can be found in the `github.com/miekg/exdns` repository. - -## Supported RFCs - -*all of them* - -* 103{4,5} - DNS standard -* 1348 - NSAP record (removed the record) -* 1982 - Serial Arithmetic -* 1876 - LOC record -* 1995 - IXFR -* 1996 - DNS notify -* 2136 - DNS Update (dynamic updates) -* 2181 - RRset definition - there is no RRset type though, just []RR -* 2537 - RSAMD5 DNS keys -* 2065 - DNSSEC (updated in later RFCs) -* 2671 - EDNS record -* 2782 - SRV record -* 2845 - TSIG record -* 2915 - NAPTR record -* 2929 - DNS IANA Considerations -* 3110 - RSASHA1 DNS keys -* 3225 - DO bit (DNSSEC OK) -* 340{1,2,3} - NAPTR record -* 3445 - Limiting the scope of (DNS)KEY -* 3597 - Unknown RRs -* 403{3,4,5} - DNSSEC + validation functions -* 4255 - SSHFP record -* 4343 - Case insensitivity -* 4408 - SPF record -* 4509 - SHA256 Hash in DS -* 4592 - Wildcards in the DNS -* 4635 - HMAC SHA TSIG -* 4701 - DHCID -* 4892 - id.server -* 5001 - NSID -* 5155 - NSEC3 record -* 5205 - HIP record -* 5702 - SHA2 in the DNS -* 5936 - AXFR -* 5966 - TCP implementation recommendations -* 6605 - ECDSA -* 6725 - IANA Registry Update -* 6742 - ILNP DNS -* 6840 - Clarifications and Implementation Notes for DNS Security -* 6844 - CAA record -* 6891 - EDNS0 update -* 6895 - DNS IANA considerations -* 6975 - Algorithm Understanding in DNSSEC -* 7043 - EUI48/EUI64 records -* 7314 - DNS (EDNS) EXPIRE Option -* 7553 - URI record -* 7858 - DNS over TLS: Initiation and Performance Considerations (draft) -* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies) -* xxxx - EDNS0 DNS Update Lease (draft) - -## Loosely based upon - -* `ldns` -* `NSD` -* `Net::DNS` -* `GRONG` diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go deleted file mode 100644 index 0db7f7bf..00000000 --- a/vendor/github.com/miekg/dns/client.go +++ /dev/null @@ -1,455 +0,0 @@ -package dns - -// A client implementation. - -import ( - "bytes" - "crypto/tls" - "encoding/binary" - "io" - "net" - "time" -) - -const dnsTimeout time.Duration = 2 * time.Second -const tcpIdleTimeout time.Duration = 8 * time.Second - -// A Conn represents a connection to a DNS server. -type Conn struct { - net.Conn // a net.Conn holding the connection - UDPSize uint16 // minimum receive buffer for UDP messages - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified - rtt time.Duration - t time.Time - tsigRequestMAC string -} - -// A Client defines parameters for a DNS client. -type Client struct { - Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) - UDPSize uint16 // minimum receive buffer for UDP messages - TLSConfig *tls.Config // TLS connection configuration - Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified - SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass - group singleflight -} - -// Exchange performs a synchronous UDP query. It sends the message m to the address -// contained in a and waits for a reply. Exchange does not retry a failed query, nor -// will it fall back to TCP in case of truncation. -// See client.Exchange for more information on setting larger buffer sizes. -func Exchange(m *Msg, a string) (r *Msg, err error) { - var co *Conn - co, err = DialTimeout("udp", a, dnsTimeout) - if err != nil { - return nil, err - } - - defer co.Close() - - opt := m.IsEdns0() - // If EDNS0 is used use that for size. - if opt != nil && opt.UDPSize() >= MinMsgSize { - co.UDPSize = opt.UDPSize() - } - - co.SetWriteDeadline(time.Now().Add(dnsTimeout)) - if err = co.WriteMsg(m); err != nil { - return nil, err - } - - co.SetReadDeadline(time.Now().Add(dnsTimeout)) - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - return r, err -} - -// ExchangeConn performs a synchronous query. It sends the message m via the connection -// c and waits for a reply. The connection c is not closed by ExchangeConn. -// This function is going away, but can easily be mimicked: -// -// co := &dns.Conn{Conn: c} // c is your net.Conn -// co.WriteMsg(m) -// in, _ := co.ReadMsg() -// co.Close() -// -func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { - println("dns: this function is deprecated") - co := new(Conn) - co.Conn = c - if err = co.WriteMsg(m); err != nil { - return nil, err - } - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - return r, err -} - -// Exchange performs a synchronous query. It sends the message m to the address -// contained in a and waits for a reply. Basic use pattern with a *dns.Client: -// -// c := new(dns.Client) -// in, rtt, err := c.Exchange(message, "127.0.0.1:53") -// -// Exchange does not retry a failed query, nor will it fall back to TCP in -// case of truncation. -// It is up to the caller to create a message that allows for larger responses to be -// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger -// buffer, see SetEdns0. Messsages without an OPT RR will fallback to the historic limit -// of 512 bytes. -func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - if !c.SingleInflight { - return c.exchange(m, a) - } - // This adds a bunch of garbage, TODO(miek). - t := "nop" - if t1, ok := TypeToString[m.Question[0].Qtype]; ok { - t = t1 - } - cl := "nop" - if cl1, ok := ClassToString[m.Question[0].Qclass]; ok { - cl = cl1 - } - r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { - return c.exchange(m, a) - }) - if err != nil { - return r, rtt, err - } - if shared { - return r.Copy(), rtt, nil - } - return r, rtt, nil -} - -func (c *Client) dialTimeout() time.Duration { - if c.Timeout != 0 { - return c.Timeout - } - if c.DialTimeout != 0 { - return c.DialTimeout - } - return dnsTimeout -} - -func (c *Client) readTimeout() time.Duration { - if c.ReadTimeout != 0 { - return c.ReadTimeout - } - return dnsTimeout -} - -func (c *Client) writeTimeout() time.Duration { - if c.WriteTimeout != 0 { - return c.WriteTimeout - } - return dnsTimeout -} - -func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - var co *Conn - network := "udp" - tls := false - - switch c.Net { - case "tcp-tls": - network = "tcp" - tls = true - case "tcp4-tls": - network = "tcp4" - tls = true - case "tcp6-tls": - network = "tcp6" - tls = true - default: - if c.Net != "" { - network = c.Net - } - } - - var deadline time.Time - if c.Timeout != 0 { - deadline = time.Now().Add(c.Timeout) - } - - if tls { - co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, c.dialTimeout()) - } else { - co, err = DialTimeout(network, a, c.dialTimeout()) - } - - if err != nil { - return nil, 0, err - } - defer co.Close() - - opt := m.IsEdns0() - // If EDNS0 is used use that for size. - if opt != nil && opt.UDPSize() >= MinMsgSize { - co.UDPSize = opt.UDPSize() - } - // Otherwise use the client's configured UDP size. - if opt == nil && c.UDPSize >= MinMsgSize { - co.UDPSize = c.UDPSize - } - - co.TsigSecret = c.TsigSecret - co.SetWriteDeadline(deadlineOrTimeout(deadline, c.writeTimeout())) - if err = co.WriteMsg(m); err != nil { - return nil, 0, err - } - - co.SetReadDeadline(deadlineOrTimeout(deadline, c.readTimeout())) - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - return r, co.rtt, err -} - -// ReadMsg reads a message from the connection co. -// If the received message contains a TSIG record the transaction -// signature is verified. -func (co *Conn) ReadMsg() (*Msg, error) { - p, err := co.ReadMsgHeader(nil) - if err != nil { - return nil, err - } - - m := new(Msg) - if err := m.Unpack(p); err != nil { - // If ErrTruncated was returned, we still want to allow the user to use - // the message, but naively they can just check err if they don't want - // to use a truncated message - if err == ErrTruncated { - return m, err - } - return nil, err - } - if t := m.IsTsig(); t != nil { - if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { - return m, ErrSecret - } - // Need to work on the original message p, as that was used to calculate the tsig. - err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) - } - return m, err -} - -// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). -// Returns message as a byte slice to be parsed with Msg.Unpack later on. -// Note that error handling on the message body is not possible as only the header is parsed. -func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { - var ( - p []byte - n int - err error - ) - - switch t := co.Conn.(type) { - case *net.TCPConn, *tls.Conn: - r := t.(io.Reader) - - // First two bytes specify the length of the entire message. - l, err := tcpMsgLen(r) - if err != nil { - return nil, err - } - p = make([]byte, l) - n, err = tcpRead(r, p) - co.rtt = time.Since(co.t) - default: - if co.UDPSize > MinMsgSize { - p = make([]byte, co.UDPSize) - } else { - p = make([]byte, MinMsgSize) - } - n, err = co.Read(p) - co.rtt = time.Since(co.t) - } - - if err != nil { - return nil, err - } else if n < headerSize { - return nil, ErrShortRead - } - - p = p[:n] - if hdr != nil { - dh, _, err := unpackMsgHdr(p, 0) - if err != nil { - return nil, err - } - *hdr = dh - } - return p, err -} - -// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length. -func tcpMsgLen(t io.Reader) (int, error) { - p := []byte{0, 0} - n, err := t.Read(p) - if err != nil { - return 0, err - } - if n != 2 { - return 0, ErrShortRead - } - l := binary.BigEndian.Uint16(p) - if l == 0 { - return 0, ErrShortRead - } - return int(l), nil -} - -// tcpRead calls TCPConn.Read enough times to fill allocated buffer. -func tcpRead(t io.Reader, p []byte) (int, error) { - n, err := t.Read(p) - if err != nil { - return n, err - } - for n < len(p) { - j, err := t.Read(p[n:]) - if err != nil { - return n, err - } - n += j - } - return n, err -} - -// Read implements the net.Conn read method. -func (co *Conn) Read(p []byte) (n int, err error) { - if co.Conn == nil { - return 0, ErrConnEmpty - } - if len(p) < 2 { - return 0, io.ErrShortBuffer - } - switch t := co.Conn.(type) { - case *net.TCPConn, *tls.Conn: - r := t.(io.Reader) - - l, err := tcpMsgLen(r) - if err != nil { - return 0, err - } - if l > len(p) { - return int(l), io.ErrShortBuffer - } - return tcpRead(r, p[:l]) - } - // UDP connection - n, err = co.Conn.Read(p) - if err != nil { - return n, err - } - return n, err -} - -// WriteMsg sends a message through the connection co. -// If the message m contains a TSIG record the transaction -// signature is calculated. -func (co *Conn) WriteMsg(m *Msg) (err error) { - var out []byte - if t := m.IsTsig(); t != nil { - mac := "" - if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { - return ErrSecret - } - out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) - // Set for the next read, although only used in zone transfers - co.tsigRequestMAC = mac - } else { - out, err = m.Pack() - } - if err != nil { - return err - } - co.t = time.Now() - if _, err = co.Write(out); err != nil { - return err - } - return nil -} - -// Write implements the net.Conn Write method. -func (co *Conn) Write(p []byte) (n int, err error) { - switch t := co.Conn.(type) { - case *net.TCPConn, *tls.Conn: - w := t.(io.Writer) - - lp := len(p) - if lp < 2 { - return 0, io.ErrShortBuffer - } - if lp > MaxMsgSize { - return 0, &Error{err: "message too large"} - } - l := make([]byte, 2, lp+2) - binary.BigEndian.PutUint16(l, uint16(lp)) - p = append(l, p...) - n, err := io.Copy(w, bytes.NewReader(p)) - return int(n), err - } - n, err = co.Conn.(*net.UDPConn).Write(p) - return n, err -} - -// Dial connects to the address on the named network. -func Dial(network, address string) (conn *Conn, err error) { - conn = new(Conn) - conn.Conn, err = net.Dial(network, address) - if err != nil { - return nil, err - } - return conn, nil -} - -// DialTimeout acts like Dial but takes a timeout. -func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { - conn = new(Conn) - conn.Conn, err = net.DialTimeout(network, address, timeout) - if err != nil { - return nil, err - } - return conn, nil -} - -// DialWithTLS connects to the address on the named network with TLS. -func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { - conn = new(Conn) - conn.Conn, err = tls.Dial(network, address, tlsConfig) - if err != nil { - return nil, err - } - return conn, nil -} - -// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. -func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { - var dialer net.Dialer - dialer.Timeout = timeout - - conn = new(Conn) - conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig) - if err != nil { - return nil, err - } - return conn, nil -} - -func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time { - if deadline.IsZero() { - return time.Now().Add(timeout) - } - return deadline -} diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go deleted file mode 100644 index cfa9ad0b..00000000 --- a/vendor/github.com/miekg/dns/clientconfig.go +++ /dev/null @@ -1,99 +0,0 @@ -package dns - -import ( - "bufio" - "os" - "strconv" - "strings" -) - -// ClientConfig wraps the contents of the /etc/resolv.conf file. -type ClientConfig struct { - Servers []string // servers to use - Search []string // suffixes to append to local name - Port string // what port to use - Ndots int // number of dots in name to trigger absolute lookup - Timeout int // seconds before giving up on packet - Attempts int // lost packets before giving up on server, not used in the package dns -} - -// ClientConfigFromFile parses a resolv.conf(5) like file and returns -// a *ClientConfig. -func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { - file, err := os.Open(resolvconf) - if err != nil { - return nil, err - } - defer file.Close() - c := new(ClientConfig) - scanner := bufio.NewScanner(file) - c.Servers = make([]string, 0) - c.Search = make([]string, 0) - c.Port = "53" - c.Ndots = 1 - c.Timeout = 5 - c.Attempts = 2 - - for scanner.Scan() { - if err := scanner.Err(); err != nil { - return nil, err - } - line := scanner.Text() - f := strings.Fields(line) - if len(f) < 1 { - continue - } - switch f[0] { - case "nameserver": // add one name server - if len(f) > 1 { - // One more check: make sure server name is - // just an IP address. Otherwise we need DNS - // to look it up. - name := f[1] - c.Servers = append(c.Servers, name) - } - - case "domain": // set search path to just this domain - if len(f) > 1 { - c.Search = make([]string, 1) - c.Search[0] = f[1] - } else { - c.Search = make([]string, 0) - } - - case "search": // set search path to given servers - c.Search = make([]string, len(f)-1) - for i := 0; i < len(c.Search); i++ { - c.Search[i] = f[i+1] - } - - case "options": // magic options - for i := 1; i < len(f); i++ { - s := f[i] - switch { - case len(s) >= 6 && s[:6] == "ndots:": - n, _ := strconv.Atoi(s[6:]) - if n < 1 { - n = 1 - } - c.Ndots = n - case len(s) >= 8 && s[:8] == "timeout:": - n, _ := strconv.Atoi(s[8:]) - if n < 1 { - n = 1 - } - c.Timeout = n - case len(s) >= 8 && s[:9] == "attempts:": - n, _ := strconv.Atoi(s[9:]) - if n < 1 { - n = 1 - } - c.Attempts = n - case s == "rotate": - /* not imp */ - } - } - } - } - return c, nil -} diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go deleted file mode 100644 index cdaa833f..00000000 --- a/vendor/github.com/miekg/dns/dane.go +++ /dev/null @@ -1,44 +0,0 @@ -package dns - -import ( - "crypto/sha256" - "crypto/sha512" - "crypto/x509" - "encoding/hex" - "errors" - "io" -) - -// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records. -func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { - switch matchingType { - case 0: - switch selector { - case 0: - return hex.EncodeToString(cert.Raw), nil - case 1: - return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil - } - case 1: - h := sha256.New() - switch selector { - case 0: - io.WriteString(h, string(cert.Raw)) - return hex.EncodeToString(h.Sum(nil)), nil - case 1: - io.WriteString(h, string(cert.RawSubjectPublicKeyInfo)) - return hex.EncodeToString(h.Sum(nil)), nil - } - case 2: - h := sha512.New() - switch selector { - case 0: - io.WriteString(h, string(cert.Raw)) - return hex.EncodeToString(h.Sum(nil)), nil - case 1: - io.WriteString(h, string(cert.RawSubjectPublicKeyInfo)) - return hex.EncodeToString(h.Sum(nil)), nil - } - } - return "", errors.New("dns: bad MatchingType or Selector") -} diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go deleted file mode 100644 index cf456165..00000000 --- a/vendor/github.com/miekg/dns/defaults.go +++ /dev/null @@ -1,282 +0,0 @@ -package dns - -import ( - "errors" - "net" - "strconv" -) - -const hexDigit = "0123456789abcdef" - -// Everything is assumed in ClassINET. - -// SetReply creates a reply message from a request message. -func (dns *Msg) SetReply(request *Msg) *Msg { - dns.Id = request.Id - dns.RecursionDesired = request.RecursionDesired // Copy rd bit - dns.Response = true - dns.Opcode = OpcodeQuery - dns.Rcode = RcodeSuccess - if len(request.Question) > 0 { - dns.Question = make([]Question, 1) - dns.Question[0] = request.Question[0] - } - return dns -} - -// SetQuestion creates a question message, it sets the Question -// section, generates an Id and sets the RecursionDesired (RD) -// bit to true. -func (dns *Msg) SetQuestion(z string, t uint16) *Msg { - dns.Id = Id() - dns.RecursionDesired = true - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, t, ClassINET} - return dns -} - -// SetNotify creates a notify message, it sets the Question -// section, generates an Id and sets the Authoritative (AA) -// bit to true. -func (dns *Msg) SetNotify(z string) *Msg { - dns.Opcode = OpcodeNotify - dns.Authoritative = true - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeSOA, ClassINET} - return dns -} - -// SetRcode creates an error message suitable for the request. -func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { - dns.SetReply(request) - dns.Rcode = rcode - return dns -} - -// SetRcodeFormatError creates a message with FormError set. -func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { - dns.Rcode = RcodeFormatError - dns.Opcode = OpcodeQuery - dns.Response = true - dns.Authoritative = false - dns.Id = request.Id - return dns -} - -// SetUpdate makes the message a dynamic update message. It -// sets the ZONE section to: z, TypeSOA, ClassINET. -func (dns *Msg) SetUpdate(z string) *Msg { - dns.Id = Id() - dns.Response = false - dns.Opcode = OpcodeUpdate - dns.Compress = false // BIND9 cannot handle compression - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeSOA, ClassINET} - return dns -} - -// SetIxfr creates message for requesting an IXFR. -func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Ns = make([]RR, 1) - s := new(SOA) - s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} - s.Serial = serial - s.Ns = ns - s.Mbox = mbox - dns.Question[0] = Question{z, TypeIXFR, ClassINET} - dns.Ns[0] = s - return dns -} - -// SetAxfr creates message for requesting an AXFR. -func (dns *Msg) SetAxfr(z string) *Msg { - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeAXFR, ClassINET} - return dns -} - -// SetTsig appends a TSIG RR to the message. -// This is only a skeleton TSIG RR that is added as the last RR in the -// additional section. The Tsig is calculated when the message is being send. -func (dns *Msg) SetTsig(z, algo string, fudge, timesigned int64) *Msg { - t := new(TSIG) - t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} - t.Algorithm = algo - t.Fudge = 300 - t.TimeSigned = uint64(timesigned) - t.OrigId = dns.Id - dns.Extra = append(dns.Extra, t) - return dns -} - -// SetEdns0 appends a EDNS0 OPT RR to the message. -// TSIG should always the last RR in a message. -func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { - e := new(OPT) - e.Hdr.Name = "." - e.Hdr.Rrtype = TypeOPT - e.SetUDPSize(udpsize) - if do { - e.SetDo() - } - dns.Extra = append(dns.Extra, e) - return dns -} - -// IsTsig checks if the message has a TSIG record as the last record -// in the additional section. It returns the TSIG record found or nil. -func (dns *Msg) IsTsig() *TSIG { - if len(dns.Extra) > 0 { - if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { - return dns.Extra[len(dns.Extra)-1].(*TSIG) - } - } - return nil -} - -// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 -// record in the additional section will do. It returns the OPT record -// found or nil. -func (dns *Msg) IsEdns0() *OPT { - // EDNS0 is at the end of the additional section, start there. - // We might want to change this to *only* look at the last two - // records. So we see TSIG and/or OPT - this a slightly bigger - // change though. - for i := len(dns.Extra) - 1; i >= 0; i-- { - if dns.Extra[i].Header().Rrtype == TypeOPT { - return dns.Extra[i].(*OPT) - } - } - return nil -} - -// IsDomainName checks if s is a valid domain name, it returns the number of -// labels and true, when a domain name is valid. Note that non fully qualified -// domain name is considered valid, in this case the last label is counted in -// the number of labels. When false is returned the number of labels is not -// defined. Also note that this function is extremely liberal; almost any -// string is a valid domain name as the DNS is 8 bit protocol. It checks if each -// label fits in 63 characters, but there is no length check for the entire -// string s. I.e. a domain name longer than 255 characters is considered valid. -func IsDomainName(s string) (labels int, ok bool) { - _, labels, err := packDomainName(s, nil, 0, nil, false) - return labels, err == nil -} - -// IsSubDomain checks if child is indeed a child of the parent. If child and parent -// are the same domain true is returned as well. -func IsSubDomain(parent, child string) bool { - // Entire child is contained in parent - return CompareDomainName(parent, child) == CountLabel(parent) -} - -// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. -// The checking is performed on the binary payload. -func IsMsg(buf []byte) error { - // Header - if len(buf) < 12 { - return errors.New("dns: bad message header") - } - // Header: Opcode - // TODO(miek): more checks here, e.g. check all header bits. - return nil -} - -// IsFqdn checks if a domain name is fully qualified. -func IsFqdn(s string) bool { - l := len(s) - if l == 0 { - return false - } - return s[l-1] == '.' -} - -// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. -// This means the RRs need to have the same type, name, and class. Returns true -// if the RR set is valid, otherwise false. -func IsRRset(rrset []RR) bool { - if len(rrset) == 0 { - return false - } - if len(rrset) == 1 { - return true - } - rrHeader := rrset[0].Header() - rrType := rrHeader.Rrtype - rrClass := rrHeader.Class - rrName := rrHeader.Name - - for _, rr := range rrset[1:] { - curRRHeader := rr.Header() - if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { - // Mismatch between the records, so this is not a valid rrset for - //signing/verifying - return false - } - } - - return true -} - -// Fqdn return the fully qualified domain name from s. -// If s is already fully qualified, it behaves as the identity function. -func Fqdn(s string) string { - if IsFqdn(s) { - return s - } - return s + "." -} - -// Copied from the official Go code. - -// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP -// address suitable for reverse DNS (PTR) record lookups or an error if it fails -// to parse the IP address. -func ReverseAddr(addr string) (arpa string, err error) { - ip := net.ParseIP(addr) - if ip == nil { - return "", &Error{err: "unrecognized address: " + addr} - } - if ip.To4() != nil { - return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + - strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil - } - // Must be IPv6 - buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) - // Add it, in reverse, to the buffer - for i := len(ip) - 1; i >= 0; i-- { - v := ip[i] - buf = append(buf, hexDigit[v&0xF]) - buf = append(buf, '.') - buf = append(buf, hexDigit[v>>4]) - buf = append(buf, '.') - } - // Append "ip6.arpa." and return (buf already has the final .) - buf = append(buf, "ip6.arpa."...) - return string(buf), nil -} - -// String returns the string representation for the type t. -func (t Type) String() string { - if t1, ok := TypeToString[uint16(t)]; ok { - return t1 - } - return "TYPE" + strconv.Itoa(int(t)) -} - -// String returns the string representation for the class c. -func (c Class) String() string { - if c1, ok := ClassToString[uint16(c)]; ok { - return c1 - } - return "CLASS" + strconv.Itoa(int(c)) -} - -// String returns the string representation for the name n. -func (n Name) String() string { - return sprintName(string(n)) -} diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go deleted file mode 100644 index b3292287..00000000 --- a/vendor/github.com/miekg/dns/dns.go +++ /dev/null @@ -1,104 +0,0 @@ -package dns - -import "strconv" - -const ( - year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. - defaultTtl = 3600 // Default internal TTL. - - DefaultMsgSize = 4096 // DefaultMsgSize is the standard default for messages larger than 512 bytes. - MinMsgSize = 512 // MinMsgSize is the minimal size of a DNS packet. - MaxMsgSize = 65535 // MaxMsgSize is the largest possible DNS packet. -) - -// Error represents a DNS error. -type Error struct{ err string } - -func (e *Error) Error() string { - if e == nil { - return "dns: " - } - return "dns: " + e.err -} - -// An RR represents a resource record. -type RR interface { - // Header returns the header of an resource record. The header contains - // everything up to the rdata. - Header() *RR_Header - // String returns the text representation of the resource record. - String() string - - // copy returns a copy of the RR - copy() RR - // len returns the length (in octets) of the uncompressed RR in wire format. - len() int - // pack packs an RR into wire format. - pack([]byte, int, map[string]int, bool) (int, error) -} - -// RR_Header is the header all DNS resource records share. -type RR_Header struct { - Name string `dns:"cdomain-name"` - Rrtype uint16 - Class uint16 - Ttl uint32 - Rdlength uint16 // Length of data after header. -} - -// Header returns itself. This is here to make RR_Header implements the RR interface. -func (h *RR_Header) Header() *RR_Header { return h } - -// Just to implement the RR interface. -func (h *RR_Header) copy() RR { return nil } - -func (h *RR_Header) copyHeader() *RR_Header { - r := new(RR_Header) - r.Name = h.Name - r.Rrtype = h.Rrtype - r.Class = h.Class - r.Ttl = h.Ttl - r.Rdlength = h.Rdlength - return r -} - -func (h *RR_Header) String() string { - var s string - - if h.Rrtype == TypeOPT { - s = ";" - // and maybe other things - } - - s += sprintName(h.Name) + "\t" - s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" - s += Class(h.Class).String() + "\t" - s += Type(h.Rrtype).String() + "\t" - return s -} - -func (h *RR_Header) len() int { - l := len(h.Name) + 1 - l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) - return l -} - -// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. -func (rr *RFC3597) ToRFC3597(r RR) error { - buf := make([]byte, r.len()*2) - off, err := PackRR(r, buf, 0, nil, false) - if err != nil { - return err - } - buf = buf[:off] - if int(r.Header().Rdlength) > off { - return ErrBuf - } - - rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength)) - if err != nil { - return err - } - *rr = *rfc3597.(*RFC3597) - return nil -} diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go deleted file mode 100644 index f5f3fbdd..00000000 --- a/vendor/github.com/miekg/dns/dnssec.go +++ /dev/null @@ -1,721 +0,0 @@ -package dns - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - _ "crypto/md5" - "crypto/rand" - "crypto/rsa" - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/asn1" - "encoding/binary" - "encoding/hex" - "math/big" - "sort" - "strings" - "time" -) - -// DNSSEC encryption algorithm codes. -const ( - _ uint8 = iota - RSAMD5 - DH - DSA - _ // Skip 4, RFC 6725, section 2.1 - RSASHA1 - DSANSEC3SHA1 - RSASHA1NSEC3SHA1 - RSASHA256 - _ // Skip 9, RFC 6725, section 2.1 - RSASHA512 - _ // Skip 11, RFC 6725, section 2.1 - ECCGOST - ECDSAP256SHA256 - ECDSAP384SHA384 - INDIRECT uint8 = 252 - PRIVATEDNS uint8 = 253 // Private (experimental keys) - PRIVATEOID uint8 = 254 -) - -// Map for algorithm names. -var AlgorithmToString = map[uint8]string{ - RSAMD5: "RSAMD5", - DH: "DH", - DSA: "DSA", - RSASHA1: "RSASHA1", - DSANSEC3SHA1: "DSA-NSEC3-SHA1", - RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", - RSASHA256: "RSASHA256", - RSASHA512: "RSASHA512", - ECCGOST: "ECC-GOST", - ECDSAP256SHA256: "ECDSAP256SHA256", - ECDSAP384SHA384: "ECDSAP384SHA384", - INDIRECT: "INDIRECT", - PRIVATEDNS: "PRIVATEDNS", - PRIVATEOID: "PRIVATEOID", -} - -// Map of algorithm strings. -var StringToAlgorithm = reverseInt8(AlgorithmToString) - -// Map of algorithm crypto hashes. -var AlgorithmToHash = map[uint8]crypto.Hash{ - RSAMD5: crypto.MD5, // Deprecated in RFC 6725 - RSASHA1: crypto.SHA1, - RSASHA1NSEC3SHA1: crypto.SHA1, - RSASHA256: crypto.SHA256, - ECDSAP256SHA256: crypto.SHA256, - ECDSAP384SHA384: crypto.SHA384, - RSASHA512: crypto.SHA512, -} - -// DNSSEC hashing algorithm codes. -const ( - _ uint8 = iota - SHA1 // RFC 4034 - SHA256 // RFC 4509 - GOST94 // RFC 5933 - SHA384 // Experimental - SHA512 // Experimental -) - -// Map for hash names. -var HashToString = map[uint8]string{ - SHA1: "SHA1", - SHA256: "SHA256", - GOST94: "GOST94", - SHA384: "SHA384", - SHA512: "SHA512", -} - -// Map of hash strings. -var StringToHash = reverseInt8(HashToString) - -// DNSKEY flag values. -const ( - SEP = 1 - REVOKE = 1 << 7 - ZONE = 1 << 8 -) - -// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing. -type rrsigWireFmt struct { - TypeCovered uint16 - Algorithm uint8 - Labels uint8 - OrigTtl uint32 - Expiration uint32 - Inception uint32 - KeyTag uint16 - SignerName string `dns:"domain-name"` - /* No Signature */ -} - -// Used for converting DNSKEY's rdata to wirefmt. -type dnskeyWireFmt struct { - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` - /* Nothing is left out */ -} - -func divRoundUp(a, b int) int { - return (a + b - 1) / b -} - -// KeyTag calculates the keytag (or key-id) of the DNSKEY. -func (k *DNSKEY) KeyTag() uint16 { - if k == nil { - return 0 - } - var keytag int - switch k.Algorithm { - case RSAMD5: - // Look at the bottom two bytes of the modules, which the last - // item in the pubkey. We could do this faster by looking directly - // at the base64 values. But I'm lazy. - modulus, _ := fromBase64([]byte(k.PublicKey)) - if len(modulus) > 1 { - x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) - keytag = int(x) - } - default: - keywire := new(dnskeyWireFmt) - keywire.Flags = k.Flags - keywire.Protocol = k.Protocol - keywire.Algorithm = k.Algorithm - keywire.PublicKey = k.PublicKey - wire := make([]byte, DefaultMsgSize) - n, err := packKeyWire(keywire, wire) - if err != nil { - return 0 - } - wire = wire[:n] - for i, v := range wire { - if i&1 != 0 { - keytag += int(v) // must be larger than uint32 - } else { - keytag += int(v) << 8 - } - } - keytag += (keytag >> 16) & 0xFFFF - keytag &= 0xFFFF - } - return uint16(keytag) -} - -// ToDS converts a DNSKEY record to a DS record. -func (k *DNSKEY) ToDS(h uint8) *DS { - if k == nil { - return nil - } - ds := new(DS) - ds.Hdr.Name = k.Hdr.Name - ds.Hdr.Class = k.Hdr.Class - ds.Hdr.Rrtype = TypeDS - ds.Hdr.Ttl = k.Hdr.Ttl - ds.Algorithm = k.Algorithm - ds.DigestType = h - ds.KeyTag = k.KeyTag() - - keywire := new(dnskeyWireFmt) - keywire.Flags = k.Flags - keywire.Protocol = k.Protocol - keywire.Algorithm = k.Algorithm - keywire.PublicKey = k.PublicKey - wire := make([]byte, DefaultMsgSize) - n, err := packKeyWire(keywire, wire) - if err != nil { - return nil - } - wire = wire[:n] - - owner := make([]byte, 255) - off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) - if err1 != nil { - return nil - } - owner = owner[:off] - // RFC4034: - // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); - // "|" denotes concatenation - // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. - - // digest buffer - digest := append(owner, wire...) // another copy - - var hash crypto.Hash - switch h { - case SHA1: - hash = crypto.SHA1 - case SHA256: - hash = crypto.SHA256 - case SHA384: - hash = crypto.SHA384 - case SHA512: - hash = crypto.SHA512 - default: - return nil - } - - s := hash.New() - s.Write(digest) - ds.Digest = hex.EncodeToString(s.Sum(nil)) - return ds -} - -// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. -func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { - c := &CDNSKEY{DNSKEY: *k} - c.Hdr = *k.Hdr.copyHeader() - c.Hdr.Rrtype = TypeCDNSKEY - return c -} - -// ToCDS converts a DS record to a CDS record. -func (d *DS) ToCDS() *CDS { - c := &CDS{DS: *d} - c.Hdr = *d.Hdr.copyHeader() - c.Hdr.Rrtype = TypeCDS - return c -} - -// Sign signs an RRSet. The signature needs to be filled in with the values: -// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied -// from the RRset. Sign returns a non-nill error when the signing went OK. -// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non -// zero, it is used as-is, otherwise the TTL of the RRset is used as the -// OrigTTL. -func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { - if k == nil { - return ErrPrivKey - } - // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return ErrKey - } - - rr.Hdr.Rrtype = TypeRRSIG - rr.Hdr.Name = rrset[0].Header().Name - rr.Hdr.Class = rrset[0].Header().Class - if rr.OrigTtl == 0 { // If set don't override - rr.OrigTtl = rrset[0].Header().Ttl - } - rr.TypeCovered = rrset[0].Header().Rrtype - rr.Labels = uint8(CountLabel(rrset[0].Header().Name)) - - if strings.HasPrefix(rrset[0].Header().Name, "*") { - rr.Labels-- // wildcard, remove from label count - } - - sigwire := new(rrsigWireFmt) - sigwire.TypeCovered = rr.TypeCovered - sigwire.Algorithm = rr.Algorithm - sigwire.Labels = rr.Labels - sigwire.OrigTtl = rr.OrigTtl - sigwire.Expiration = rr.Expiration - sigwire.Inception = rr.Inception - sigwire.KeyTag = rr.KeyTag - // For signing, lowercase this name - sigwire.SignerName = strings.ToLower(rr.SignerName) - - // Create the desired binary blob - signdata := make([]byte, DefaultMsgSize) - n, err := packSigWire(sigwire, signdata) - if err != nil { - return err - } - signdata = signdata[:n] - wire, err := rawSignatureData(rrset, rr) - if err != nil { - return err - } - signdata = append(signdata, wire...) - - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return ErrAlg - } - - h := hash.New() - h.Write(signdata) - - signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) - if err != nil { - return err - } - - rr.Signature = toBase64(signature) - - return nil -} - -func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { - signature, err := k.Sign(rand.Reader, hashed, hash) - if err != nil { - return nil, err - } - - switch alg { - case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: - return signature, nil - - case ECDSAP256SHA256, ECDSAP384SHA384: - ecdsaSignature := &struct { - R, S *big.Int - }{} - if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil { - return nil, err - } - - var intlen int - switch alg { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - - signature := intToBytes(ecdsaSignature.R, intlen) - signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) - return signature, nil - - // There is no defined interface for what a DSA backed crypto.Signer returns - case DSA, DSANSEC3SHA1: - // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) - // signature := []byte{byte(t)} - // signature = append(signature, intToBytes(r1, 20)...) - // signature = append(signature, intToBytes(s1, 20)...) - // rr.Signature = signature - } - - return nil, ErrAlg -} - -// Verify validates an RRSet with the signature and key. This is only the -// cryptographic test, the signature validity period must be checked separately. -// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. -func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { - // First the easy checks - if !IsRRset(rrset) { - return ErrRRset - } - if rr.KeyTag != k.KeyTag() { - return ErrKey - } - if rr.Hdr.Class != k.Hdr.Class { - return ErrKey - } - if rr.Algorithm != k.Algorithm { - return ErrKey - } - if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) { - return ErrKey - } - if k.Protocol != 3 { - return ErrKey - } - - // IsRRset checked that we have at least one RR and that the RRs in - // the set have consistent type, class, and name. Also check that type and - // class matches the RRSIG record. - if rrset[0].Header().Class != rr.Hdr.Class { - return ErrRRset - } - if rrset[0].Header().Rrtype != rr.TypeCovered { - return ErrRRset - } - - // RFC 4035 5.3.2. Reconstructing the Signed Data - // Copy the sig, except the rrsig data - sigwire := new(rrsigWireFmt) - sigwire.TypeCovered = rr.TypeCovered - sigwire.Algorithm = rr.Algorithm - sigwire.Labels = rr.Labels - sigwire.OrigTtl = rr.OrigTtl - sigwire.Expiration = rr.Expiration - sigwire.Inception = rr.Inception - sigwire.KeyTag = rr.KeyTag - sigwire.SignerName = strings.ToLower(rr.SignerName) - // Create the desired binary blob - signeddata := make([]byte, DefaultMsgSize) - n, err := packSigWire(sigwire, signeddata) - if err != nil { - return err - } - signeddata = signeddata[:n] - wire, err := rawSignatureData(rrset, rr) - if err != nil { - return err - } - signeddata = append(signeddata, wire...) - - sigbuf := rr.sigBuf() // Get the binary signature data - if rr.Algorithm == PRIVATEDNS { // PRIVATEOID - // TODO(miek) - // remove the domain name and assume its ours? - } - - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return ErrAlg - } - - switch rr.Algorithm { - case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: - // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? - pubkey := k.publicKeyRSA() // Get the key - if pubkey == nil { - return ErrKey - } - - h := hash.New() - h.Write(signeddata) - return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) - - case ECDSAP256SHA256, ECDSAP384SHA384: - pubkey := k.publicKeyECDSA() - if pubkey == nil { - return ErrKey - } - - // Split sigbuf into the r and s coordinates - r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) - s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) - - h := hash.New() - h.Write(signeddata) - if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { - return nil - } - return ErrSig - - default: - return ErrAlg - } -} - -// ValidityPeriod uses RFC1982 serial arithmetic to calculate -// if a signature period is valid. If t is the zero time, the -// current time is taken other t is. Returns true if the signature -// is valid at the given time, otherwise returns false. -func (rr *RRSIG) ValidityPeriod(t time.Time) bool { - var utc int64 - if t.IsZero() { - utc = time.Now().UTC().Unix() - } else { - utc = t.UTC().Unix() - } - modi := (int64(rr.Inception) - utc) / year68 - mode := (int64(rr.Expiration) - utc) / year68 - ti := int64(rr.Inception) + (modi * year68) - te := int64(rr.Expiration) + (mode * year68) - return ti <= utc && utc <= te -} - -// Return the signatures base64 encodedig sigdata as a byte slice. -func (rr *RRSIG) sigBuf() []byte { - sigbuf, err := fromBase64([]byte(rr.Signature)) - if err != nil { - return nil - } - return sigbuf -} - -// publicKeyRSA returns the RSA public key from a DNSKEY record. -func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - - // RFC 2537/3110, section 2. RSA Public KEY Resource Records - // Length is in the 0th byte, unless its zero, then it - // it in bytes 1 and 2 and its a 16 bit number - explen := uint16(keybuf[0]) - keyoff := 1 - if explen == 0 { - explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) - keyoff = 3 - } - pubkey := new(rsa.PublicKey) - - pubkey.N = big.NewInt(0) - shift := uint64((explen - 1) * 8) - expo := uint64(0) - for i := int(explen - 1); i > 0; i-- { - expo += uint64(keybuf[keyoff+i]) << shift - shift -= 8 - } - // Remainder - expo += uint64(keybuf[keyoff]) - if expo > 2<<31 { - // Larger expo than supported. - // println("dns: F5 primes (or larger) are not supported") - return nil - } - pubkey.E = int(expo) - - pubkey.N.SetBytes(keybuf[keyoff+int(explen):]) - return pubkey -} - -// publicKeyECDSA returns the Curve public key from the DNSKEY record. -func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - pubkey := new(ecdsa.PublicKey) - switch k.Algorithm { - case ECDSAP256SHA256: - pubkey.Curve = elliptic.P256() - if len(keybuf) != 64 { - // wrongly encoded key - return nil - } - case ECDSAP384SHA384: - pubkey.Curve = elliptic.P384() - if len(keybuf) != 96 { - // Wrongly encoded key - return nil - } - } - pubkey.X = big.NewInt(0) - pubkey.X.SetBytes(keybuf[:len(keybuf)/2]) - pubkey.Y = big.NewInt(0) - pubkey.Y.SetBytes(keybuf[len(keybuf)/2:]) - return pubkey -} - -func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - if len(keybuf) < 22 { - return nil - } - t, keybuf := int(keybuf[0]), keybuf[1:] - size := 64 + t*8 - q, keybuf := keybuf[:20], keybuf[20:] - if len(keybuf) != 3*size { - return nil - } - p, keybuf := keybuf[:size], keybuf[size:] - g, y := keybuf[:size], keybuf[size:] - pubkey := new(dsa.PublicKey) - pubkey.Parameters.Q = big.NewInt(0).SetBytes(q) - pubkey.Parameters.P = big.NewInt(0).SetBytes(p) - pubkey.Parameters.G = big.NewInt(0).SetBytes(g) - pubkey.Y = big.NewInt(0).SetBytes(y) - return pubkey -} - -type wireSlice [][]byte - -func (p wireSlice) Len() int { return len(p) } -func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p wireSlice) Less(i, j int) bool { - _, ioff, _ := UnpackDomainName(p[i], 0) - _, joff, _ := UnpackDomainName(p[j], 0) - return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 -} - -// Return the raw signature data. -func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { - wires := make(wireSlice, len(rrset)) - for i, r := range rrset { - r1 := r.copy() - r1.Header().Ttl = s.OrigTtl - labels := SplitDomainName(r1.Header().Name) - // 6.2. Canonical RR Form. (4) - wildcards - if len(labels) > int(s.Labels) { - // Wildcard - r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." - } - // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase - r1.Header().Name = strings.ToLower(r1.Header().Name) - // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. - // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, - // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, - // SRV, DNAME, A6 - // - // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC): - // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record - // that needs conversion to lowercase, and twice at that. Since HINFO - // records contain no domain names, they are not subject to case - // conversion. - switch x := r1.(type) { - case *NS: - x.Ns = strings.ToLower(x.Ns) - case *CNAME: - x.Target = strings.ToLower(x.Target) - case *SOA: - x.Ns = strings.ToLower(x.Ns) - x.Mbox = strings.ToLower(x.Mbox) - case *MB: - x.Mb = strings.ToLower(x.Mb) - case *MG: - x.Mg = strings.ToLower(x.Mg) - case *MR: - x.Mr = strings.ToLower(x.Mr) - case *PTR: - x.Ptr = strings.ToLower(x.Ptr) - case *MINFO: - x.Rmail = strings.ToLower(x.Rmail) - x.Email = strings.ToLower(x.Email) - case *MX: - x.Mx = strings.ToLower(x.Mx) - case *NAPTR: - x.Replacement = strings.ToLower(x.Replacement) - case *KX: - x.Exchanger = strings.ToLower(x.Exchanger) - case *SRV: - x.Target = strings.ToLower(x.Target) - case *DNAME: - x.Target = strings.ToLower(x.Target) - } - // 6.2. Canonical RR Form. (5) - origTTL - wire := make([]byte, r1.len()+1) // +1 to be safe(r) - off, err1 := PackRR(r1, wire, 0, nil, false) - if err1 != nil { - return nil, err1 - } - wire = wire[:off] - wires[i] = wire - } - sort.Sort(wires) - for i, wire := range wires { - if i > 0 && bytes.Equal(wire, wires[i-1]) { - continue - } - buf = append(buf, wire...) - } - return buf, nil -} - -func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) { - // copied from zmsg.go RRSIG packing - off, err := packUint16(sw.TypeCovered, msg, 0) - if err != nil { - return off, err - } - off, err = packUint8(sw.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(sw.Labels, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(sw.OrigTtl, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(sw.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(sw.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(sw.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(sw.SignerName, msg, off, nil, false) - if err != nil { - return off, err - } - return off, nil -} - -func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) { - // copied from zmsg.go DNSKEY packing - off, err := packUint16(dw.Flags, msg, 0) - if err != nil { - return off, err - } - off, err = packUint8(dw.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(dw.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(dw.PublicKey, msg, off) - if err != nil { - return off, err - } - return off, nil -} diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go deleted file mode 100644 index 229a0793..00000000 --- a/vendor/github.com/miekg/dns/dnssec_keygen.go +++ /dev/null @@ -1,156 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "math/big" -) - -// Generate generates a DNSKEY of the given bit size. -// The public part is put inside the DNSKEY record. -// The Algorithm in the key must be set as this will define -// what kind of DNSKEY will be generated. -// The ECDSA algorithms imply a fixed keysize, in that case -// bits should be set to the size of the algorithm. -func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { - switch k.Algorithm { - case DSA, DSANSEC3SHA1: - if bits != 1024 { - return nil, ErrKeySize - } - case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: - if bits < 512 || bits > 4096 { - return nil, ErrKeySize - } - case RSASHA512: - if bits < 1024 || bits > 4096 { - return nil, ErrKeySize - } - case ECDSAP256SHA256: - if bits != 256 { - return nil, ErrKeySize - } - case ECDSAP384SHA384: - if bits != 384 { - return nil, ErrKeySize - } - } - - switch k.Algorithm { - case DSA, DSANSEC3SHA1: - params := new(dsa.Parameters) - if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { - return nil, err - } - priv := new(dsa.PrivateKey) - priv.PublicKey.Parameters = *params - err := dsa.GenerateKey(priv, rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) - return priv, nil - case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: - priv, err := rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) - return priv, nil - case ECDSAP256SHA256, ECDSAP384SHA384: - var c elliptic.Curve - switch k.Algorithm { - case ECDSAP256SHA256: - c = elliptic.P256() - case ECDSAP384SHA384: - c = elliptic.P384() - } - priv, err := ecdsa.GenerateKey(c, rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) - return priv, nil - default: - return nil, ErrAlg - } -} - -// Set the public key (the value E and N) -func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { - if _E == 0 || _N == nil { - return false - } - buf := exponentToBuf(_E) - buf = append(buf, _N.Bytes()...) - k.PublicKey = toBase64(buf) - return true -} - -// Set the public key for Elliptic Curves -func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { - if _X == nil || _Y == nil { - return false - } - var intlen int - switch k.Algorithm { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) - return true -} - -// Set the public key for DSA -func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { - if _Q == nil || _P == nil || _G == nil || _Y == nil { - return false - } - buf := dsaToBuf(_Q, _P, _G, _Y) - k.PublicKey = toBase64(buf) - return true -} - -// Set the public key (the values E and N) for RSA -// RFC 3110: Section 2. RSA Public KEY Resource Records -func exponentToBuf(_E int) []byte { - var buf []byte - i := big.NewInt(int64(_E)) - if len(i.Bytes()) < 256 { - buf = make([]byte, 1) - buf[0] = uint8(len(i.Bytes())) - } else { - buf = make([]byte, 3) - buf[0] = 0 - buf[1] = uint8(len(i.Bytes()) >> 8) - buf[2] = uint8(len(i.Bytes())) - } - buf = append(buf, i.Bytes()...) - return buf -} - -// Set the public key for X and Y for Curve. The two -// values are just concatenated. -func curveToBuf(_X, _Y *big.Int, intlen int) []byte { - buf := intToBytes(_X, intlen) - buf = append(buf, intToBytes(_Y, intlen)...) - return buf -} - -// Set the public key for X and Y for Curve. The two -// values are just concatenated. -func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { - t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8) - buf := []byte{byte(t)} - buf = append(buf, intToBytes(_Q, 20)...) - buf = append(buf, intToBytes(_P, 64+t*8)...) - buf = append(buf, intToBytes(_G, 64+t*8)...) - buf = append(buf, intToBytes(_Y, 64+t*8)...) - return buf -} diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go deleted file mode 100644 index 9ff3a617..00000000 --- a/vendor/github.com/miekg/dns/dnssec_keyscan.go +++ /dev/null @@ -1,249 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "io" - "math/big" - "strconv" - "strings" -) - -// NewPrivateKey returns a PrivateKey by parsing the string s. -// s should be in the same form of the BIND private key files. -func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { - if s == "" || s[len(s)-1] != '\n' { // We need a closing newline - return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") - } - return k.ReadPrivateKey(strings.NewReader(s), "") -} - -// ReadPrivateKey reads a private key from the io.Reader q. The string file is -// only used in error reporting. -// The public key must be known, because some cryptographic algorithms embed -// the public inside the privatekey. -func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) { - m, err := parseKey(q, file) - if m == nil { - return nil, err - } - if _, ok := m["private-key-format"]; !ok { - return nil, ErrPrivKey - } - if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { - return nil, ErrPrivKey - } - // TODO(mg): check if the pubkey matches the private key - algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0]) - if err != nil { - return nil, ErrPrivKey - } - switch uint8(algo) { - case DSA: - priv, err := readPrivateKeyDSA(m) - if err != nil { - return nil, err - } - pub := k.publicKeyDSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return priv, nil - case RSAMD5: - fallthrough - case RSASHA1: - fallthrough - case RSASHA1NSEC3SHA1: - fallthrough - case RSASHA256: - fallthrough - case RSASHA512: - priv, err := readPrivateKeyRSA(m) - if err != nil { - return nil, err - } - pub := k.publicKeyRSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return priv, nil - case ECCGOST: - return nil, ErrPrivKey - case ECDSAP256SHA256: - fallthrough - case ECDSAP384SHA384: - priv, err := readPrivateKeyECDSA(m) - if err != nil { - return nil, err - } - pub := k.publicKeyECDSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return priv, nil - default: - return nil, ErrPrivKey - } -} - -// Read a private key (file) string and create a public key. Return the private key. -func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { - p := new(rsa.PrivateKey) - p.Primes = []*big.Int{nil, nil} - for k, v := range m { - switch k { - case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - switch k { - case "modulus": - p.PublicKey.N = big.NewInt(0) - p.PublicKey.N.SetBytes(v1) - case "publicexponent": - i := big.NewInt(0) - i.SetBytes(v1) - p.PublicKey.E = int(i.Int64()) // int64 should be large enough - case "privateexponent": - p.D = big.NewInt(0) - p.D.SetBytes(v1) - case "prime1": - p.Primes[0] = big.NewInt(0) - p.Primes[0].SetBytes(v1) - case "prime2": - p.Primes[1] = big.NewInt(0) - p.Primes[1].SetBytes(v1) - } - case "exponent1", "exponent2", "coefficient": - // not used in Go (yet) - case "created", "publish", "activate": - // not used in Go (yet) - } - } - return p, nil -} - -func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { - p := new(dsa.PrivateKey) - p.X = big.NewInt(0) - for k, v := range m { - switch k { - case "private_value(x)": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - p.X.SetBytes(v1) - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - -func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { - p := new(ecdsa.PrivateKey) - p.D = big.NewInt(0) - // TODO: validate that the required flags are present - for k, v := range m { - switch k { - case "privatekey": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - p.D.SetBytes(v1) - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - -// parseKey reads a private key from r. It returns a map[string]string, -// with the key-value pairs, or an error when the file is not correct. -func parseKey(r io.Reader, file string) (map[string]string, error) { - s := scanInit(r) - m := make(map[string]string) - c := make(chan lex) - k := "" - // Start the lexer - go klexer(s, c) - for l := range c { - // It should alternate - switch l.value { - case zKey: - k = l.token - case zValue: - if k == "" { - return nil, &ParseError{file, "no private key seen", l} - } - //println("Setting", strings.ToLower(k), "to", l.token, "b") - m[strings.ToLower(k)] = l.token - k = "" - } - } - return m, nil -} - -// klexer scans the sourcefile and returns tokens on the channel c. -func klexer(s *scan, c chan lex) { - var l lex - str := "" // Hold the current read text - commt := false - key := true - x, err := s.tokenText() - defer close(c) - for err == nil { - l.column = s.position.Column - l.line = s.position.Line - switch x { - case ':': - if commt { - break - } - l.token = str - if key { - l.value = zKey - c <- l - // Next token is a space, eat it - s.tokenText() - key = false - str = "" - } else { - l.value = zValue - } - case ';': - commt = true - case '\n': - if commt { - // Reset a comment - commt = false - } - l.value = zValue - l.token = str - c <- l - str = "" - commt = false - key = true - default: - if commt { - break - } - str += string(x) - } - x, err = s.tokenText() - } - if len(str) > 0 { - // Send remainder - l.token = str - l.value = zValue - c <- l - } -} diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go deleted file mode 100644 index 56f3ea93..00000000 --- a/vendor/github.com/miekg/dns/dnssec_privkey.go +++ /dev/null @@ -1,85 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "math/big" - "strconv" -) - -const format = "Private-key-format: v1.3\n" - -// PrivateKeyString converts a PrivateKey to a string. This string has the same -// format as the private-key-file of BIND9 (Private-key-format: v1.3). -// It needs some info from the key (the algorithm), so its a method of the DNSKEY -// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey -func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { - algorithm := strconv.Itoa(int(r.Algorithm)) - algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" - - switch p := p.(type) { - case *rsa.PrivateKey: - modulus := toBase64(p.PublicKey.N.Bytes()) - e := big.NewInt(int64(p.PublicKey.E)) - publicExponent := toBase64(e.Bytes()) - privateExponent := toBase64(p.D.Bytes()) - prime1 := toBase64(p.Primes[0].Bytes()) - prime2 := toBase64(p.Primes[1].Bytes()) - // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm - // and from: http://code.google.com/p/go/issues/detail?id=987 - one := big.NewInt(1) - p1 := big.NewInt(0).Sub(p.Primes[0], one) - q1 := big.NewInt(0).Sub(p.Primes[1], one) - exp1 := big.NewInt(0).Mod(p.D, p1) - exp2 := big.NewInt(0).Mod(p.D, q1) - coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0]) - - exponent1 := toBase64(exp1.Bytes()) - exponent2 := toBase64(exp2.Bytes()) - coefficient := toBase64(coeff.Bytes()) - - return format + - "Algorithm: " + algorithm + "\n" + - "Modulus: " + modulus + "\n" + - "PublicExponent: " + publicExponent + "\n" + - "PrivateExponent: " + privateExponent + "\n" + - "Prime1: " + prime1 + "\n" + - "Prime2: " + prime2 + "\n" + - "Exponent1: " + exponent1 + "\n" + - "Exponent2: " + exponent2 + "\n" + - "Coefficient: " + coefficient + "\n" - - case *ecdsa.PrivateKey: - var intlen int - switch r.Algorithm { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - private := toBase64(intToBytes(p.D, intlen)) - return format + - "Algorithm: " + algorithm + "\n" + - "PrivateKey: " + private + "\n" - - case *dsa.PrivateKey: - T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) - prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) - subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) - base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) - priv := toBase64(intToBytes(p.X, 20)) - pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) - return format + - "Algorithm: " + algorithm + "\n" + - "Prime(p): " + prime + "\n" + - "Subprime(q): " + subprime + "\n" + - "Base(g): " + base + "\n" + - "Private_value(x): " + priv + "\n" + - "Public_value(y): " + pub + "\n" - - default: - return "" - } -} diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go deleted file mode 100644 index e38753d7..00000000 --- a/vendor/github.com/miekg/dns/doc.go +++ /dev/null @@ -1,251 +0,0 @@ -/* -Package dns implements a full featured interface to the Domain Name System. -Server- and client-side programming is supported. -The package allows complete control over what is send out to the DNS. The package -API follows the less-is-more principle, by presenting a small, clean interface. - -The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers, -TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. -Note that domain names MUST be fully qualified, before sending them, unqualified -names in a message will result in a packing failure. - -Resource records are native types. They are not stored in wire format. -Basic usage pattern for creating a new resource record: - - r := new(dns.MX) - r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, - Class: dns.ClassINET, Ttl: 3600} - r.Preference = 10 - r.Mx = "mx.miek.nl." - -Or directly from a string: - - mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") - -Or when the default TTL (3600) and class (IN) suit you: - - mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.") - -Or even: - - mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") - -In the DNS messages are exchanged, these messages contain resource -records (sets). Use pattern for creating a message: - - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - -Or when not certain if the domain name is fully qualified: - - m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) - -The message m is now a message with the question section set to ask -the MX records for the miek.nl. zone. - -The following is slightly more verbose, but more flexible: - - m1 := new(dns.Msg) - m1.Id = dns.Id() - m1.RecursionDesired = true - m1.Question = make([]dns.Question, 1) - m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} - -After creating a message it can be send. -Basic use pattern for synchronous querying the DNS at a -server configured on 127.0.0.1 and port 53: - - c := new(dns.Client) - in, rtt, err := c.Exchange(m1, "127.0.0.1:53") - -Suppressing multiple outstanding queries (with the same question, type and -class) is as easy as setting: - - c.SingleInflight = true - -If these "advanced" features are not needed, a simple UDP query can be send, -with: - - in, err := dns.Exchange(m1, "127.0.0.1:53") - -When this functions returns you will get dns message. A dns message consists -out of four sections. -The question section: in.Question, the answer section: in.Answer, -the authority section: in.Ns and the additional section: in.Extra. - -Each of these sections (except the Question section) contain a []RR. Basic -use pattern for accessing the rdata of a TXT RR as the first RR in -the Answer section: - - if t, ok := in.Answer[0].(*dns.TXT); ok { - // do something with t.Txt - } - -Domain Name and TXT Character String Representations - -Both domain names and TXT character strings are converted to presentation -form both when unpacked and when converted to strings. - -For TXT character strings, tabs, carriage returns and line feeds will be -converted to \t, \r and \n respectively. Back slashes and quotations marks -will be escaped. Bytes below 32 and above 127 will be converted to \DDD -form. - -For domain names, in addition to the above rules brackets, periods, -spaces, semicolons and the at symbol are escaped. - -DNSSEC - -DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It -uses public key cryptography to sign resource records. The -public keys are stored in DNSKEY records and the signatures in RRSIG records. - -Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit -to a request. - - m := new(dns.Msg) - m.SetEdns0(4096, true) - -Signature generation, signature verification and key generation are all supported. - -DYNAMIC UPDATES - -Dynamic updates reuses the DNS message format, but renames three of -the sections. Question is Zone, Answer is Prerequisite, Authority is -Update, only the Additional is not renamed. See RFC 2136 for the gory details. - -You can set a rather complex set of rules for the existence of absence of -certain resource records or names in a zone to specify if resource records -should be added or removed. The table from RFC 2136 supplemented with the Go -DNS function shows which functions exist to specify the prerequisites. - - 3.2.4 - Table Of Metavalues Used In Prerequisite Section - - CLASS TYPE RDATA Meaning Function - -------------------------------------------------------------- - ANY ANY empty Name is in use dns.NameUsed - ANY rrset empty RRset exists (value indep) dns.RRsetUsed - NONE ANY empty Name is not in use dns.NameNotUsed - NONE rrset empty RRset does not exist dns.RRsetNotUsed - zone rrset rr RRset exists (value dep) dns.Used - -The prerequisite section can also be left empty. -If you have decided on the prerequisites you can tell what RRs should -be added or deleted. The next table shows the options you have and -what functions to call. - - 3.4.2.6 - Table Of Metavalues Used In Update Section - - CLASS TYPE RDATA Meaning Function - --------------------------------------------------------------- - ANY ANY empty Delete all RRsets from name dns.RemoveName - ANY rrset empty Delete an RRset dns.RemoveRRset - NONE rrset rr Delete an RR from RRset dns.Remove - zone rrset rr Add to an RRset dns.Insert - -TRANSACTION SIGNATURE - -An TSIG or transaction signature adds a HMAC TSIG record to each message sent. -The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. - -Basic use pattern when querying with a TSIG name "axfr." (note that these key names -must be fully qualified - as they are domain names) and the base64 secret -"so6ZGir4GPAqINNh9U5c3A==": - - c := new(dns.Client) - c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - ... - // When sending the TSIG RR is calculated and filled in before sending - -When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with -TSIG, this is the basic use pattern. In this example we request an AXFR for -miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A==" -and using the server 176.58.119.54: - - t := new(dns.Transfer) - m := new(dns.Msg) - t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - m.SetAxfr("miek.nl.") - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - c, err := t.In(m, "176.58.119.54:53") - for r := range c { ... } - -You can now read the records from the transfer as they come in. Each envelope is checked with TSIG. -If something is not correct an error is returned. - -Basic use pattern validating and replying to a message that has TSIG set. - - server := &dns.Server{Addr: ":53", Net: "udp"} - server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - go server.ListenAndServe() - dns.HandleFunc(".", handleRequest) - - func handleRequest(w dns.ResponseWriter, r *dns.Msg) { - m := new(dns.Msg) - m.SetReply(r) - if r.IsTsig() != nil { - if w.TsigStatus() == nil { - // *Msg r has an TSIG record and it was validated - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - } else { - // *Msg r has an TSIG records and it was not valided - } - } - w.WriteMsg(m) - } - -PRIVATE RRS - -RFC 6895 sets aside a range of type codes for private use. This range -is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these -can be used, before requesting an official type code from IANA. - -see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more -information. - -EDNS0 - -EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated -by RFC 6891. It defines an new RR type, the OPT RR, which is then completely -abused. -Basic use pattern for creating an (empty) OPT RR: - - o := new(dns.OPT) - o.Hdr.Name = "." // MUST be the root zone, per definition. - o.Hdr.Rrtype = dns.TypeOPT - -The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) -interfaces. Currently only a few have been standardized: EDNS0_NSID -(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note -that these options may be combined in an OPT RR. -Basic use pattern for a server to check if (and which) options are set: - - // o is a dns.OPT - for _, s := range o.Option { - switch e := s.(type) { - case *dns.EDNS0_NSID: - // do stuff with e.Nsid - case *dns.EDNS0_SUBNET: - // access e.Family, e.Address, etc. - } - } - -SIG(0) - -From RFC 2931: - - SIG(0) provides protection for DNS transactions and requests .... - ... protection for glue records, DNS requests, protection for message headers - on requests and responses, and protection of the overall integrity of a response. - -It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared -secret approach in TSIG. -Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and -RSASHA512. - -Signing subsequent messages in multi-message sessions is not implemented. -*/ -package dns diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go deleted file mode 100644 index fc0b4692..00000000 --- a/vendor/github.com/miekg/dns/edns.go +++ /dev/null @@ -1,597 +0,0 @@ -package dns - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "net" - "strconv" -) - -// EDNS0 Option codes. -const ( - EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 - EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt - EDNS0NSID = 0x3 // nsid (RFC5001) - EDNS0DAU = 0x5 // DNSSEC Algorithm Understood - EDNS0DHU = 0x6 // DS Hash Understood - EDNS0N3U = 0x7 // NSEC3 Hash Understood - EDNS0SUBNET = 0x8 // client-subnet (RFC6891) - EDNS0EXPIRE = 0x9 // EDNS0 expire - EDNS0COOKIE = 0xa // EDNS0 Cookie - EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (RFC7828) - EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET - EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891) - EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891) - _DO = 1 << 15 // dnssec ok -) - -// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. -// See RFC 6891. -type OPT struct { - Hdr RR_Header - Option []EDNS0 `dns:"opt"` -} - -func (rr *OPT) String() string { - s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " - if rr.Do() { - s += "flags: do; " - } else { - s += "flags: ; " - } - s += "udp: " + strconv.Itoa(int(rr.UDPSize())) - - for _, o := range rr.Option { - switch o.(type) { - case *EDNS0_NSID: - s += "\n; NSID: " + o.String() - h, e := o.pack() - var r string - if e == nil { - for _, c := range h { - r += "(" + string(c) + ")" - } - s += " " + r - } - case *EDNS0_SUBNET: - s += "\n; SUBNET: " + o.String() - if o.(*EDNS0_SUBNET).DraftOption { - s += " (draft)" - } - case *EDNS0_COOKIE: - s += "\n; COOKIE: " + o.String() - case *EDNS0_UL: - s += "\n; UPDATE LEASE: " + o.String() - case *EDNS0_LLQ: - s += "\n; LONG LIVED QUERIES: " + o.String() - case *EDNS0_DAU: - s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() - case *EDNS0_DHU: - s += "\n; DS HASH UNDERSTOOD: " + o.String() - case *EDNS0_N3U: - s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() - case *EDNS0_LOCAL: - s += "\n; LOCAL OPT: " + o.String() - } - } - return s -} - -func (rr *OPT) len() int { - l := rr.Hdr.len() - for i := 0; i < len(rr.Option); i++ { - l += 4 // Account for 2-byte option code and 2-byte option length. - lo, _ := rr.Option[i].pack() - l += len(lo) - } - return l -} - -// return the old value -> delete SetVersion? - -// Version returns the EDNS version used. Only zero is defined. -func (rr *OPT) Version() uint8 { - return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16) -} - -// SetVersion sets the version of EDNS. This is usually zero. -func (rr *OPT) SetVersion(v uint8) { - rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16) -} - -// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). -func (rr *OPT) ExtendedRcode() int { - return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15 -} - -// SetExtendedRcode sets the EDNS extended RCODE field. -func (rr *OPT) SetExtendedRcode(v uint8) { - if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have! - return - } - rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24) -} - -// UDPSize returns the UDP buffer size. -func (rr *OPT) UDPSize() uint16 { - return rr.Hdr.Class -} - -// SetUDPSize sets the UDP buffer size. -func (rr *OPT) SetUDPSize(size uint16) { - rr.Hdr.Class = size -} - -// Do returns the value of the DO (DNSSEC OK) bit. -func (rr *OPT) Do() bool { - return rr.Hdr.Ttl&_DO == _DO -} - -// SetDo sets the DO (DNSSEC OK) bit. -// If we pass an argument, set the DO bit to that value. -// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. -func (rr *OPT) SetDo(do ...bool) { - if len(do) == 1 { - if do[0] { - rr.Hdr.Ttl |= _DO - } else { - rr.Hdr.Ttl &^= _DO - } - } else { - rr.Hdr.Ttl |= _DO - } -} - -// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. -type EDNS0 interface { - // Option returns the option code for the option. - Option() uint16 - // pack returns the bytes of the option data. - pack() ([]byte, error) - // unpack sets the data as found in the buffer. Is also sets - // the length of the slice as the length of the option data. - unpack([]byte) error - // String returns the string representation of the option. - String() string -} - -// The nsid EDNS0 option is used to retrieve a nameserver -// identifier. When sending a request Nsid must be set to the empty string -// The identifier is an opaque string encoded as hex. -// Basic use pattern for creating an nsid option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_NSID) -// e.Code = dns.EDNS0NSID -// e.Nsid = "AA" -// o.Option = append(o.Option, e) -type EDNS0_NSID struct { - Code uint16 // Always EDNS0NSID - Nsid string // This string needs to be hex encoded -} - -func (e *EDNS0_NSID) pack() ([]byte, error) { - h, err := hex.DecodeString(e.Nsid) - if err != nil { - return nil, err - } - return h, nil -} - -func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } -func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } -func (e *EDNS0_NSID) String() string { return string(e.Nsid) } - -// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver -// an idea of where the client lives. It can then give back a different -// answer depending on the location or network topology. -// Basic use pattern for creating an subnet option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_SUBNET) -// e.Code = dns.EDNS0SUBNET -// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 -// e.NetMask = 32 // 32 for IPV4, 128 for IPv6 -// e.SourceScope = 0 -// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 -// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 -// o.Option = append(o.Option, e) -// -// Note: the spec (draft-ietf-dnsop-edns-client-subnet-00) has some insane logic -// for which netmask applies to the address. This code will parse all the -// available bits when unpacking (up to optlen). When packing it will apply -// SourceNetmask. If you need more advanced logic, patches welcome and good luck. -type EDNS0_SUBNET struct { - Code uint16 // Always EDNS0SUBNET - Family uint16 // 1 for IP, 2 for IP6 - SourceNetmask uint8 - SourceScope uint8 - Address net.IP - DraftOption bool // Set to true if using the old (0x50fa) option code -} - -func (e *EDNS0_SUBNET) Option() uint16 { - if e.DraftOption { - return EDNS0SUBNETDRAFT - } - return EDNS0SUBNET -} - -func (e *EDNS0_SUBNET) pack() ([]byte, error) { - b := make([]byte, 4) - binary.BigEndian.PutUint16(b[0:], e.Family) - b[2] = e.SourceNetmask - b[3] = e.SourceScope - switch e.Family { - case 1: - if e.SourceNetmask > net.IPv4len*8 { - return nil, errors.New("dns: bad netmask") - } - if len(e.Address.To4()) != net.IPv4len { - return nil, errors.New("dns: bad address") - } - ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) - needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up - b = append(b, ip[:needLength]...) - case 2: - if e.SourceNetmask > net.IPv6len*8 { - return nil, errors.New("dns: bad netmask") - } - if len(e.Address) != net.IPv6len { - return nil, errors.New("dns: bad address") - } - ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) - needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up - b = append(b, ip[:needLength]...) - default: - return nil, errors.New("dns: bad address family") - } - return b, nil -} - -func (e *EDNS0_SUBNET) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Family = binary.BigEndian.Uint16(b) - e.SourceNetmask = b[2] - e.SourceScope = b[3] - switch e.Family { - case 1: - if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { - return errors.New("dns: bad netmask") - } - addr := make([]byte, net.IPv4len) - for i := 0; i < net.IPv4len && 4+i < len(b); i++ { - addr[i] = b[4+i] - } - e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3]) - case 2: - if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { - return errors.New("dns: bad netmask") - } - addr := make([]byte, net.IPv6len) - for i := 0; i < net.IPv6len && 4+i < len(b); i++ { - addr[i] = b[4+i] - } - e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4], - addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], - addr[11], addr[12], addr[13], addr[14], addr[15]} - default: - return errors.New("dns: bad address family") - } - return nil -} - -func (e *EDNS0_SUBNET) String() (s string) { - if e.Address == nil { - s = "" - } else if e.Address.To4() != nil { - s = e.Address.String() - } else { - s = "[" + e.Address.String() + "]" - } - s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) - return -} - -// The Cookie EDNS0 option -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_COOKIE) -// e.Code = dns.EDNS0COOKIE -// e.Cookie = "24a5ac.." -// o.Option = append(o.Option, e) -// -// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is -// always 8 bytes. It may then optionally be followed by the server cookie. The server -// cookie is of variable length, 8 to a maximum of 32 bytes. In other words: -// -// cCookie := o.Cookie[:16] -// sCookie := o.Cookie[16:] -// -// There is no guarantee that the Cookie string has a specific length. -type EDNS0_COOKIE struct { - Code uint16 // Always EDNS0COOKIE - Cookie string // Hex-encoded cookie data -} - -func (e *EDNS0_COOKIE) pack() ([]byte, error) { - h, err := hex.DecodeString(e.Cookie) - if err != nil { - return nil, err - } - return h, nil -} - -func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } -func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } -func (e *EDNS0_COOKIE) String() string { return e.Cookie } - -// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set -// an expiration on an update RR. This is helpful for clients that cannot clean -// up after themselves. This is a draft RFC and more information can be found at -// http://files.dns-sd.org/draft-sekar-dns-ul.txt -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_UL) -// e.Code = dns.EDNS0UL -// e.Lease = 120 // in seconds -// o.Option = append(o.Option, e) -type EDNS0_UL struct { - Code uint16 // Always EDNS0UL - Lease uint32 -} - -func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } -func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } - -// Copied: http://golang.org/src/pkg/net/dnsmsg.go -func (e *EDNS0_UL) pack() ([]byte, error) { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, e.Lease) - return b, nil -} - -func (e *EDNS0_UL) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Lease = binary.BigEndian.Uint32(b) - return nil -} - -// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 -// Implemented for completeness, as the EDNS0 type code is assigned. -type EDNS0_LLQ struct { - Code uint16 // Always EDNS0LLQ - Version uint16 - Opcode uint16 - Error uint16 - Id uint64 - LeaseLife uint32 -} - -func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } - -func (e *EDNS0_LLQ) pack() ([]byte, error) { - b := make([]byte, 18) - binary.BigEndian.PutUint16(b[0:], e.Version) - binary.BigEndian.PutUint16(b[2:], e.Opcode) - binary.BigEndian.PutUint16(b[4:], e.Error) - binary.BigEndian.PutUint64(b[6:], e.Id) - binary.BigEndian.PutUint32(b[14:], e.LeaseLife) - return b, nil -} - -func (e *EDNS0_LLQ) unpack(b []byte) error { - if len(b) < 18 { - return ErrBuf - } - e.Version = binary.BigEndian.Uint16(b[0:]) - e.Opcode = binary.BigEndian.Uint16(b[2:]) - e.Error = binary.BigEndian.Uint16(b[4:]) - e.Id = binary.BigEndian.Uint64(b[6:]) - e.LeaseLife = binary.BigEndian.Uint32(b[14:]) - return nil -} - -func (e *EDNS0_LLQ) String() string { - s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + - " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) + - " " + strconv.FormatUint(uint64(e.LeaseLife), 10) - return s -} - -type EDNS0_DAU struct { - Code uint16 // Always EDNS0DAU - AlgCode []uint8 -} - -func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } -func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_DAU) String() string { - s := "" - for i := 0; i < len(e.AlgCode); i++ { - if a, ok := AlgorithmToString[e.AlgCode[i]]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(e.AlgCode[i])) - } - } - return s -} - -type EDNS0_DHU struct { - Code uint16 // Always EDNS0DHU - AlgCode []uint8 -} - -func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } -func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_DHU) String() string { - s := "" - for i := 0; i < len(e.AlgCode); i++ { - if a, ok := HashToString[e.AlgCode[i]]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(e.AlgCode[i])) - } - } - return s -} - -type EDNS0_N3U struct { - Code uint16 // Always EDNS0N3U - AlgCode []uint8 -} - -func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } -func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_N3U) String() string { - // Re-use the hash map - s := "" - for i := 0; i < len(e.AlgCode); i++ { - if a, ok := HashToString[e.AlgCode[i]]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(e.AlgCode[i])) - } - } - return s -} - -type EDNS0_EXPIRE struct { - Code uint16 // Always EDNS0EXPIRE - Expire uint32 -} - -func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } -func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } - -func (e *EDNS0_EXPIRE) pack() ([]byte, error) { - b := make([]byte, 4) - b[0] = byte(e.Expire >> 24) - b[1] = byte(e.Expire >> 16) - b[2] = byte(e.Expire >> 8) - b[3] = byte(e.Expire) - return b, nil -} - -func (e *EDNS0_EXPIRE) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Expire = binary.BigEndian.Uint32(b) - return nil -} - -// The EDNS0_LOCAL option is used for local/experimental purposes. The option -// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] -// (RFC6891), although any unassigned code can actually be used. The content of -// the option is made available in Data, unaltered. -// Basic use pattern for creating a local option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_LOCAL) -// e.Code = dns.EDNS0LOCALSTART -// e.Data = []byte{72, 82, 74} -// o.Option = append(o.Option, e) -type EDNS0_LOCAL struct { - Code uint16 - Data []byte -} - -func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } -func (e *EDNS0_LOCAL) String() string { - return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) -} - -func (e *EDNS0_LOCAL) pack() ([]byte, error) { - b := make([]byte, len(e.Data)) - copied := copy(b, e.Data) - if copied != len(e.Data) { - return nil, ErrBuf - } - return b, nil -} - -func (e *EDNS0_LOCAL) unpack(b []byte) error { - e.Data = make([]byte, len(b)) - copied := copy(e.Data, b) - if copied != len(b) { - return ErrBuf - } - return nil -} - -type EDNS0_TCP_KEEPALIVE struct { - Code uint16 // Always EDNSTCPKEEPALIVE - Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present; - Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order. -} - -func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { - return EDNS0TCPKEEPALIVE -} - -func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) { - if e.Timeout != 0 && e.Length != 2 { - return nil, errors.New("dns: timeout specified but length is not 2") - } - if e.Timeout == 0 && e.Length != 0 { - return nil, errors.New("dns: timeout not specified but length is not 0") - } - b := make([]byte, 4+e.Length) - binary.BigEndian.PutUint16(b[0:], e.Code) - binary.BigEndian.PutUint16(b[2:], e.Length) - if e.Length == 2 { - binary.BigEndian.PutUint16(b[4:], e.Timeout) - } - return b, nil -} - -func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Length = binary.BigEndian.Uint16(b[2:4]) - if e.Length != 0 && e.Length != 2 { - return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10)) - } - if e.Length == 2 { - if len(b) < 6 { - return ErrBuf - } - e.Timeout = binary.BigEndian.Uint16(b[4:6]) - } - return nil -} - -func (e *EDNS0_TCP_KEEPALIVE) String() (s string) { - s = "use tcp keep-alive" - if e.Length == 0 { - s += ", timeout omitted" - } else { - s += fmt.Sprintf(", timeout %dms", e.Timeout*100) - } - return -} diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go deleted file mode 100644 index 3f5303c2..00000000 --- a/vendor/github.com/miekg/dns/format.go +++ /dev/null @@ -1,87 +0,0 @@ -package dns - -import ( - "net" - "reflect" - "strconv" -) - -// NumField returns the number of rdata fields r has. -func NumField(r RR) int { - return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header -} - -// Field returns the rdata field i as a string. Fields are indexed starting from 1. -// RR types that holds slice data, for instance the NSEC type bitmap will return a single -// string where the types are concatenated using a space. -// Accessing non existing fields will cause a panic. -func Field(r RR, i int) string { - if i == 0 { - return "" - } - d := reflect.ValueOf(r).Elem().Field(i) - switch k := d.Kind(); k { - case reflect.String: - return d.String() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(d.Int(), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return strconv.FormatUint(d.Uint(), 10) - case reflect.Slice: - switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { - case `dns:"a"`: - // TODO(miek): Hmm store this as 16 bytes - if d.Len() < net.IPv6len { - return net.IPv4(byte(d.Index(0).Uint()), - byte(d.Index(1).Uint()), - byte(d.Index(2).Uint()), - byte(d.Index(3).Uint())).String() - } - return net.IPv4(byte(d.Index(12).Uint()), - byte(d.Index(13).Uint()), - byte(d.Index(14).Uint()), - byte(d.Index(15).Uint())).String() - case `dns:"aaaa"`: - return net.IP{ - byte(d.Index(0).Uint()), - byte(d.Index(1).Uint()), - byte(d.Index(2).Uint()), - byte(d.Index(3).Uint()), - byte(d.Index(4).Uint()), - byte(d.Index(5).Uint()), - byte(d.Index(6).Uint()), - byte(d.Index(7).Uint()), - byte(d.Index(8).Uint()), - byte(d.Index(9).Uint()), - byte(d.Index(10).Uint()), - byte(d.Index(11).Uint()), - byte(d.Index(12).Uint()), - byte(d.Index(13).Uint()), - byte(d.Index(14).Uint()), - byte(d.Index(15).Uint()), - }.String() - case `dns:"nsec"`: - if d.Len() == 0 { - return "" - } - s := Type(d.Index(0).Uint()).String() - for i := 1; i < d.Len(); i++ { - s += " " + Type(d.Index(i).Uint()).String() - } - return s - default: - // if it does not have a tag its a string slice - fallthrough - case `dns:"txt"`: - if d.Len() == 0 { - return "" - } - s := d.Index(0).String() - for i := 1; i < d.Len(); i++ { - s += " " + d.Index(i).String() - } - return s - } - } - return "" -} diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go deleted file mode 100644 index e4481a4b..00000000 --- a/vendor/github.com/miekg/dns/generate.go +++ /dev/null @@ -1,159 +0,0 @@ -package dns - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" -) - -// Parse the $GENERATE statement as used in BIND9 zones. -// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. -// We are called after '$GENERATE '. After which we expect: -// * the range (12-24/2) -// * lhs (ownername) -// * [[ttl][class]] -// * type -// * rhs (rdata) -// But we are lazy here, only the range is parsed *all* occurrences -// of $ after that are interpreted. -// Any error are returned as a string value, the empty string signals -// "no error". -func generate(l lex, c chan lex, t chan *Token, o string) string { - step := 1 - if i := strings.IndexAny(l.token, "/"); i != -1 { - if i+1 == len(l.token) { - return "bad step in $GENERATE range" - } - if s, err := strconv.Atoi(l.token[i+1:]); err == nil { - if s < 0 { - return "bad step in $GENERATE range" - } - step = s - } else { - return "bad step in $GENERATE range" - } - l.token = l.token[:i] - } - sx := strings.SplitN(l.token, "-", 2) - if len(sx) != 2 { - return "bad start-stop in $GENERATE range" - } - start, err := strconv.Atoi(sx[0]) - if err != nil { - return "bad start in $GENERATE range" - } - end, err := strconv.Atoi(sx[1]) - if err != nil { - return "bad stop in $GENERATE range" - } - if end < 0 || start < 0 || end < start { - return "bad range in $GENERATE range" - } - - <-c // _BLANK - // Create a complete new string, which we then parse again. - s := "" -BuildRR: - l = <-c - if l.value != zNewline && l.value != zEOF { - s += l.token - goto BuildRR - } - for i := start; i <= end; i += step { - var ( - escape bool - dom bytes.Buffer - mod string - err error - offset int - ) - - for j := 0; j < len(s); j++ { // No 'range' because we need to jump around - switch s[j] { - case '\\': - if escape { - dom.WriteByte('\\') - escape = false - continue - } - escape = true - case '$': - mod = "%d" - offset = 0 - if escape { - dom.WriteByte('$') - escape = false - continue - } - escape = false - if j+1 >= len(s) { // End of the string - dom.WriteString(fmt.Sprintf(mod, i+offset)) - continue - } else { - if s[j+1] == '$' { - dom.WriteByte('$') - j++ - continue - } - } - // Search for { and } - if s[j+1] == '{' { // Modifier block - sep := strings.Index(s[j+2:], "}") - if sep == -1 { - return "bad modifier in $GENERATE" - } - mod, offset, err = modToPrintf(s[j+2 : j+2+sep]) - if err != nil { - return err.Error() - } - j += 2 + sep // Jump to it - } - dom.WriteString(fmt.Sprintf(mod, i+offset)) - default: - if escape { // Pretty useless here - escape = false - continue - } - dom.WriteByte(s[j]) - } - } - // Re-parse the RR and send it on the current channel t - rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String()) - if err != nil { - return err.Error() - } - t <- &Token{RR: rx} - // Its more efficient to first built the rrlist and then parse it in - // one go! But is this a problem? - } - return "" -} - -// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. -func modToPrintf(s string) (string, int, error) { - xs := strings.SplitN(s, ",", 3) - if len(xs) != 3 { - return "", 0, errors.New("bad modifier in $GENERATE") - } - // xs[0] is offset, xs[1] is width, xs[2] is base - if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" { - return "", 0, errors.New("bad base in $GENERATE") - } - offset, err := strconv.Atoi(xs[0]) - if err != nil || offset > 255 { - return "", 0, errors.New("bad offset in $GENERATE") - } - width, err := strconv.Atoi(xs[1]) - if err != nil || width > 255 { - return "", offset, errors.New("bad width in $GENERATE") - } - switch { - case width < 0: - return "", offset, errors.New("bad width in $GENERATE") - case width == 0: - return "%" + xs[1] + xs[2], offset, nil - } - return "%0" + xs[1] + xs[2], offset, nil -} diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go deleted file mode 100644 index fca5c7dd..00000000 --- a/vendor/github.com/miekg/dns/labels.go +++ /dev/null @@ -1,168 +0,0 @@ -package dns - -// Holds a bunch of helper functions for dealing with labels. - -// SplitDomainName splits a name string into it's labels. -// www.miek.nl. returns []string{"www", "miek", "nl"} -// .www.miek.nl. returns []string{"", "www", "miek", "nl"}, -// The root label (.) returns nil. Note that using -// strings.Split(s) will work in most cases, but does not handle -// escaped dots (\.) for instance. -// s must be a syntactically valid domain name, see IsDomainName. -func SplitDomainName(s string) (labels []string) { - if len(s) == 0 { - return nil - } - fqdnEnd := 0 // offset of the final '.' or the length of the name - idx := Split(s) - begin := 0 - if s[len(s)-1] == '.' { - fqdnEnd = len(s) - 1 - } else { - fqdnEnd = len(s) - } - - switch len(idx) { - case 0: - return nil - case 1: - // no-op - default: - end := 0 - for i := 1; i < len(idx); i++ { - end = idx[i] - labels = append(labels, s[begin:end-1]) - begin = end - } - } - - labels = append(labels, s[begin:fqdnEnd]) - return labels -} - -// CompareDomainName compares the names s1 and s2 and -// returns how many labels they have in common starting from the *right*. -// The comparison stops at the first inequality. The names are not downcased -// before the comparison. -// -// www.miek.nl. and miek.nl. have two labels in common: miek and nl -// www.miek.nl. and www.bla.nl. have one label in common: nl -// -// s1 and s2 must be syntactically valid domain names. -func CompareDomainName(s1, s2 string) (n int) { - s1 = Fqdn(s1) - s2 = Fqdn(s2) - l1 := Split(s1) - l2 := Split(s2) - - // the first check: root label - if l1 == nil || l2 == nil { - return - } - - j1 := len(l1) - 1 // end - i1 := len(l1) - 2 // start - j2 := len(l2) - 1 - i2 := len(l2) - 2 - // the second check can be done here: last/only label - // before we fall through into the for-loop below - if s1[l1[j1]:] == s2[l2[j2]:] { - n++ - } else { - return - } - for { - if i1 < 0 || i2 < 0 { - break - } - if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] { - n++ - } else { - break - } - j1-- - i1-- - j2-- - i2-- - } - return -} - -// CountLabel counts the the number of labels in the string s. -// s must be a syntactically valid domain name. -func CountLabel(s string) (labels int) { - if s == "." { - return - } - off := 0 - end := false - for { - off, end = NextLabel(s, off) - labels++ - if end { - return - } - } -} - -// Split splits a name s into its label indexes. -// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. -// The root name (.) returns nil. Also see SplitDomainName. -// s must be a syntactically valid domain name. -func Split(s string) []int { - if s == "." { - return nil - } - idx := make([]int, 1, 3) - off := 0 - end := false - - for { - off, end = NextLabel(s, off) - if end { - return idx - } - idx = append(idx, off) - } -} - -// NextLabel returns the index of the start of the next label in the -// string s starting at offset. -// The bool end is true when the end of the string has been reached. -// Also see PrevLabel. -func NextLabel(s string, offset int) (i int, end bool) { - quote := false - for i = offset; i < len(s)-1; i++ { - switch s[i] { - case '\\': - quote = !quote - default: - quote = false - case '.': - if quote { - quote = !quote - continue - } - return i + 1, false - } - } - return i + 1, true -} - -// PrevLabel returns the index of the label when starting from the right and -// jumping n labels to the left. -// The bool start is true when the start of the string has been overshot. -// Also see NextLabel. -func PrevLabel(s string, n int) (i int, start bool) { - if n == 0 { - return len(s), false - } - lab := Split(s) - if lab == nil { - return 0, true - } - if n > len(lab) { - return 0, true - } - return lab[len(lab)-n], false -} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go deleted file mode 100644 index a9acd1e9..00000000 --- a/vendor/github.com/miekg/dns/msg.go +++ /dev/null @@ -1,1231 +0,0 @@ -// DNS packet assembly, see RFC 1035. Converting from - Unpack() - -// and to - Pack() - wire format. -// All the packers and unpackers take a (msg []byte, off int) -// and return (off1 int, ok bool). If they return ok==false, they -// also return off1==len(msg), so that the next unpacker will -// also fail. This lets us avoid checks of ok until the end of a -// packing sequence. - -package dns - -//go:generate go run msg_generate.go - -import ( - crand "crypto/rand" - "encoding/binary" - "math/big" - "math/rand" - "strconv" -) - -func init() { - // Initialize default math/rand source using crypto/rand to provide better - // security without the performance trade-off. - buf := make([]byte, 8) - _, err := crand.Read(buf) - if err != nil { - // Failed to read from cryptographic source, fallback to default initial - // seed (1) by returning early - return - } - seed := binary.BigEndian.Uint64(buf) - rand.Seed(int64(seed)) -} - -const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer - -var ( - ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. - ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. - ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used it too small for the message. - ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being uses before it is initialized. - ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... - ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. - ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. - ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. - ErrKey error = &Error{err: "bad key"} - ErrKeySize error = &Error{err: "bad key size"} - ErrNoSig error = &Error{err: "no signature found"} - ErrPrivKey error = &Error{err: "bad private key"} - ErrRcode error = &Error{err: "bad rcode"} - ErrRdata error = &Error{err: "bad rdata"} - ErrRRset error = &Error{err: "bad rrset"} - ErrSecret error = &Error{err: "no secrets defined"} - ErrShortRead error = &Error{err: "short read"} - ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. - ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. - ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. - ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired. -) - -// Id by default, returns a 16 bits random number to be used as a -// message id. The random provided should be good enough. This being a -// variable the function can be reassigned to a custom function. -// For instance, to make it return a static value: -// -// dns.Id = func() uint16 { return 3 } -var Id func() uint16 = id - -// id returns a 16 bits random number to be used as a -// message id. The random provided should be good enough. -func id() uint16 { - id32 := rand.Uint32() - return uint16(id32) -} - -// MsgHdr is a a manually-unpacked version of (id, bits). -type MsgHdr struct { - Id uint16 - Response bool - Opcode int - Authoritative bool - Truncated bool - RecursionDesired bool - RecursionAvailable bool - Zero bool - AuthenticatedData bool - CheckingDisabled bool - Rcode int -} - -// Msg contains the layout of a DNS message. -type Msg struct { - MsgHdr - Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. - Question []Question // Holds the RR(s) of the question section. - Answer []RR // Holds the RR(s) of the answer section. - Ns []RR // Holds the RR(s) of the authority section. - Extra []RR // Holds the RR(s) of the additional section. -} - -// ClassToString is a maps Classes to strings for each CLASS wire type. -var ClassToString = map[uint16]string{ - ClassINET: "IN", - ClassCSNET: "CS", - ClassCHAOS: "CH", - ClassHESIOD: "HS", - ClassNONE: "NONE", - ClassANY: "ANY", -} - -// OpcodeToString maps Opcodes to strings. -var OpcodeToString = map[int]string{ - OpcodeQuery: "QUERY", - OpcodeIQuery: "IQUERY", - OpcodeStatus: "STATUS", - OpcodeNotify: "NOTIFY", - OpcodeUpdate: "UPDATE", -} - -// RcodeToString maps Rcodes to strings. -var RcodeToString = map[int]string{ - RcodeSuccess: "NOERROR", - RcodeFormatError: "FORMERR", - RcodeServerFailure: "SERVFAIL", - RcodeNameError: "NXDOMAIN", - RcodeNotImplemented: "NOTIMPL", - RcodeRefused: "REFUSED", - RcodeYXDomain: "YXDOMAIN", // See RFC 2136 - RcodeYXRrset: "YXRRSET", - RcodeNXRrset: "NXRRSET", - RcodeNotAuth: "NOTAUTH", - RcodeNotZone: "NOTZONE", - RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 - // RcodeBadVers: "BADVERS", - RcodeBadKey: "BADKEY", - RcodeBadTime: "BADTIME", - RcodeBadMode: "BADMODE", - RcodeBadName: "BADNAME", - RcodeBadAlg: "BADALG", - RcodeBadTrunc: "BADTRUNC", - RcodeBadCookie: "BADCOOKIE", -} - -// Domain names are a sequence of counted strings -// split at the dots. They end with a zero-length string. - -// PackDomainName packs a domain name s into msg[off:]. -// If compression is wanted compress must be true and the compression -// map needs to hold a mapping between domain names and offsets -// pointing into msg. -func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - off1, _, err = packDomainName(s, msg, off, compression, compress) - return -} - -func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) { - // special case if msg == nil - lenmsg := 256 - if msg != nil { - lenmsg = len(msg) - } - ls := len(s) - if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. - return off, 0, nil - } - // If not fully qualified, error out, but only if msg == nil #ugly - switch { - case msg == nil: - if s[ls-1] != '.' { - s += "." - ls++ - } - case msg != nil: - if s[ls-1] != '.' { - return lenmsg, 0, ErrFqdn - } - } - // Each dot ends a segment of the name. - // We trade each dot byte for a length byte. - // Except for escaped dots (\.), which are normal dots. - // There is also a trailing zero. - - // Compression - nameoffset := -1 - pointer := -1 - // Emit sequence of counted strings, chopping at dots. - begin := 0 - bs := []byte(s) - roBs, bsFresh, escapedDot := s, true, false - for i := 0; i < ls; i++ { - if bs[i] == '\\' { - for j := i; j < ls-1; j++ { - bs[j] = bs[j+1] - } - ls-- - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf - } - // check for \DDD - if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - bs[i] = dddToByte(bs[i:]) - for j := i + 1; j < ls-2; j++ { - bs[j] = bs[j+2] - } - ls -= 2 - } else if bs[i] == 't' { - bs[i] = '\t' - } else if bs[i] == 'r' { - bs[i] = '\r' - } else if bs[i] == 'n' { - bs[i] = '\n' - } - escapedDot = bs[i] == '.' - bsFresh = false - continue - } - - if bs[i] == '.' { - if i > 0 && bs[i-1] == '.' && !escapedDot { - // two dots back to back is not legal - return lenmsg, labels, ErrRdata - } - if i-begin >= 1<<6 { // top two bits of length must be clear - return lenmsg, labels, ErrRdata - } - // off can already (we're in a loop) be bigger than len(msg) - // this happens when a name isn't fully qualified - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf - } - if msg != nil { - msg[off] = byte(i - begin) - } - offset := off - off++ - for j := begin; j < i; j++ { - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf - } - if msg != nil { - msg[off] = bs[j] - } - off++ - } - if compress && !bsFresh { - roBs = string(bs) - bsFresh = true - } - // Don't try to compress '.' - if compress && roBs[begin:] != "." { - if p, ok := compression[roBs[begin:]]; !ok { - // Only offsets smaller than this can be used. - if offset < maxCompressionOffset { - compression[roBs[begin:]] = offset - } - } else { - // The first hit is the longest matching dname - // keep the pointer offset we get back and store - // the offset of the current name, because that's - // where we need to insert the pointer later - - // If compress is true, we're allowed to compress this dname - if pointer == -1 && compress { - pointer = p // Where to point to - nameoffset = offset // Where to point from - break - } - } - } - labels++ - begin = i + 1 - } - escapedDot = false - } - // Root label is special - if len(bs) == 1 && bs[0] == '.' { - return off, labels, nil - } - // If we did compression and we find something add the pointer here - if pointer != -1 { - // We have two bytes (14 bits) to put the pointer in - // if msg == nil, we will never do compression - binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000)) - off = nameoffset + 1 - goto End - } - if msg != nil && off < len(msg) { - msg[off] = 0 - } -End: - off++ - return off, labels, nil -} - -// Unpack a domain name. -// In addition to the simple sequences of counted strings above, -// domain names are allowed to refer to strings elsewhere in the -// packet, to avoid repeating common suffixes when returning -// many entries in a single domain. The pointers are marked -// by a length byte with the top two bits set. Ignoring those -// two bits, that byte and the next give a 14 bit offset from msg[0] -// where we should pick up the trail. -// Note that if we jump elsewhere in the packet, -// we return off1 == the offset after the first pointer we found, -// which is where the next record will start. -// In theory, the pointers are only allowed to jump backward. -// We let them jump anywhere and stop jumping after a while. - -// UnpackDomainName unpacks a domain name into a string. -func UnpackDomainName(msg []byte, off int) (string, int, error) { - s := make([]byte, 0, 64) - off1 := 0 - lenmsg := len(msg) - ptr := 0 // number of pointers followed -Loop: - for { - if off >= lenmsg { - return "", lenmsg, ErrBuf - } - c := int(msg[off]) - off++ - switch c & 0xC0 { - case 0x00: - if c == 0x00 { - // end of name - break Loop - } - // literal string - if off+c > lenmsg { - return "", lenmsg, ErrBuf - } - for j := off; j < off+c; j++ { - switch b := msg[j]; b { - case '.', '(', ')', ';', ' ', '@': - fallthrough - case '"', '\\': - s = append(s, '\\', b) - case '\t': - s = append(s, '\\', 't') - case '\r': - s = append(s, '\\', 'r') - default: - if b < 32 || b >= 127 { // unprintable use \DDD - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := 0; i < 3-len(bufs); i++ { - s = append(s, '0') - } - for _, r := range bufs { - s = append(s, r) - } - } else { - s = append(s, b) - } - } - } - s = append(s, '.') - off += c - case 0xC0: - // pointer to somewhere else in msg. - // remember location after first ptr, - // since that's how many bytes we consumed. - // also, don't follow too many pointers -- - // maybe there's a loop. - if off >= lenmsg { - return "", lenmsg, ErrBuf - } - c1 := msg[off] - off++ - if ptr == 0 { - off1 = off - } - if ptr++; ptr > 10 { - return "", lenmsg, &Error{err: "too many compression pointers"} - } - off = (c^0xC0)<<8 | int(c1) - default: - // 0x80 and 0x40 are reserved - return "", lenmsg, ErrRdata - } - } - if ptr == 0 { - off1 = off - } - if len(s) == 0 { - s = []byte(".") - } - return string(s), off1, nil -} - -func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { - if len(txt) == 0 { - if offset >= len(msg) { - return offset, ErrBuf - } - msg[offset] = 0 - return offset, nil - } - var err error - for i := range txt { - if len(txt[i]) > len(tmp) { - return offset, ErrBuf - } - offset, err = packTxtString(txt[i], msg, offset, tmp) - if err != nil { - return offset, err - } - } - return offset, nil -} - -func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { - lenByteOffset := offset - if offset >= len(msg) || len(s) > len(tmp) { - return offset, ErrBuf - } - offset++ - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { - if len(msg) <= offset { - return offset, ErrBuf - } - if bs[i] == '\\' { - i++ - if i == len(bs) { - break - } - // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) - i += 2 - } else if bs[i] == 't' { - msg[offset] = '\t' - } else if bs[i] == 'r' { - msg[offset] = '\r' - } else if bs[i] == 'n' { - msg[offset] = '\n' - } else { - msg[offset] = bs[i] - } - } else { - msg[offset] = bs[i] - } - offset++ - } - l := offset - lenByteOffset - 1 - if l > 255 { - return offset, &Error{err: "string exceeded 255 bytes in txt"} - } - msg[lenByteOffset] = byte(l) - return offset, nil -} - -func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { - if offset >= len(msg) || len(s) > len(tmp) { - return offset, ErrBuf - } - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { - if len(msg) <= offset { - return offset, ErrBuf - } - if bs[i] == '\\' { - i++ - if i == len(bs) { - break - } - // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) - i += 2 - } else { - msg[offset] = bs[i] - } - } else { - msg[offset] = bs[i] - } - offset++ - } - return offset, nil -} - -func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { - off = off0 - var s string - for off < len(msg) && err == nil { - s, off, err = unpackTxtString(msg, off) - if err == nil { - ss = append(ss, s) - } - } - return -} - -func unpackTxtString(msg []byte, offset int) (string, int, error) { - if offset+1 > len(msg) { - return "", offset, &Error{err: "overflow unpacking txt"} - } - l := int(msg[offset]) - if offset+l+1 > len(msg) { - return "", offset, &Error{err: "overflow unpacking txt"} - } - s := make([]byte, 0, l) - for _, b := range msg[offset+1 : offset+1+l] { - switch b { - case '"', '\\': - s = append(s, '\\', b) - case '\t': - s = append(s, `\t`...) - case '\r': - s = append(s, `\r`...) - case '\n': - s = append(s, `\n`...) - default: - if b < 32 || b > 127 { // unprintable - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := 0; i < 3-len(bufs); i++ { - s = append(s, '0') - } - for _, r := range bufs { - s = append(s, r) - } - } else { - s = append(s, b) - } - } - } - offset += 1 + l - return string(s), offset, nil -} - -// Helpers for dealing with escaped bytes -func isDigit(b byte) bool { return b >= '0' && b <= '9' } - -func dddToByte(s []byte) byte { - return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) -} - -// Helper function for packing and unpacking -func intToBytes(i *big.Int, length int) []byte { - buf := i.Bytes() - if len(buf) < length { - b := make([]byte, length) - copy(b[length-len(buf):], buf) - return b - } - return buf -} - -// PackRR packs a resource record rr into msg[off:]. -// See PackDomainName for documentation about the compression. -func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - if rr == nil { - return len(msg), &Error{err: "nil rr"} - } - - off1, err = rr.pack(msg, off, compression, compress) - if err != nil { - return len(msg), err - } - // TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well. - if rawSetRdlength(msg, off, off1) { - return off1, nil - } - return off, ErrRdata -} - -// UnpackRR unpacks msg[off:] into an RR. -func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { - h, off, msg, err := unpackHeader(msg, off) - if err != nil { - return nil, len(msg), err - } - end := off + int(h.Rdlength) - - if fn, known := typeToUnpack[h.Rrtype]; !known { - rr, off, err = unpackRFC3597(h, msg, off) - } else { - rr, off, err = fn(h, msg, off) - } - if off != end { - return &h, end, &Error{err: "bad rdlength"} - } - return rr, off, err -} - -// unpackRRslice unpacks msg[off:] into an []RR. -// If we cannot unpack the whole array, then it will return nil -func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { - var r RR - // Optimistically make dst be the length that was sent - dst := make([]RR, 0, l) - for i := 0; i < l; i++ { - off1 := off - r, off, err = UnpackRR(msg, off) - if err != nil { - off = len(msg) - break - } - // If offset does not increase anymore, l is a lie - if off1 == off { - l = i - break - } - dst = append(dst, r) - } - if err != nil && off == len(msg) { - dst = nil - } - return dst, off, err -} - -// Convert a MsgHdr to a string, with dig-like headers: -// -//;; opcode: QUERY, status: NOERROR, id: 48404 -// -//;; flags: qr aa rd ra; -func (h *MsgHdr) String() string { - if h == nil { - return " MsgHdr" - } - - s := ";; opcode: " + OpcodeToString[h.Opcode] - s += ", status: " + RcodeToString[h.Rcode] - s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" - - s += ";; flags:" - if h.Response { - s += " qr" - } - if h.Authoritative { - s += " aa" - } - if h.Truncated { - s += " tc" - } - if h.RecursionDesired { - s += " rd" - } - if h.RecursionAvailable { - s += " ra" - } - if h.Zero { // Hmm - s += " z" - } - if h.AuthenticatedData { - s += " ad" - } - if h.CheckingDisabled { - s += " cd" - } - - s += ";" - return s -} - -// Pack packs a Msg: it is converted to to wire format. -// If the dns.Compress is true the message will be in compressed wire format. -func (dns *Msg) Pack() (msg []byte, err error) { - return dns.PackBuffer(nil) -} - -// PackBuffer packs a Msg, using the given buffer buf. If buf is too small -// a new buffer is allocated. -func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { - // We use a similar function in tsig.go's stripTsig. - var ( - dh Header - compression map[string]int - ) - - if dns.Compress { - compression = make(map[string]int) // Compression pointer mappings - } - - if dns.Rcode < 0 || dns.Rcode > 0xFFF { - return nil, ErrRcode - } - if dns.Rcode > 0xF { - // Regular RCODE field is 4 bits - opt := dns.IsEdns0() - if opt == nil { - return nil, ErrExtendedRcode - } - opt.SetExtendedRcode(uint8(dns.Rcode >> 4)) - dns.Rcode &= 0xF - } - - // Convert convenient Msg into wire-like Header. - dh.Id = dns.Id - dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode) - if dns.Response { - dh.Bits |= _QR - } - if dns.Authoritative { - dh.Bits |= _AA - } - if dns.Truncated { - dh.Bits |= _TC - } - if dns.RecursionDesired { - dh.Bits |= _RD - } - if dns.RecursionAvailable { - dh.Bits |= _RA - } - if dns.Zero { - dh.Bits |= _Z - } - if dns.AuthenticatedData { - dh.Bits |= _AD - } - if dns.CheckingDisabled { - dh.Bits |= _CD - } - - // Prepare variable sized arrays. - question := dns.Question - answer := dns.Answer - ns := dns.Ns - extra := dns.Extra - - dh.Qdcount = uint16(len(question)) - dh.Ancount = uint16(len(answer)) - dh.Nscount = uint16(len(ns)) - dh.Arcount = uint16(len(extra)) - - // We need the uncompressed length here, because we first pack it and then compress it. - msg = buf - compress := dns.Compress - dns.Compress = false - if packLen := dns.Len() + 1; len(msg) < packLen { - msg = make([]byte, packLen) - } - dns.Compress = compress - - // Pack it in: header and then the pieces. - off := 0 - off, err = dh.pack(msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - for i := 0; i < len(question); i++ { - off, err = question[i].pack(msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - } - for i := 0; i < len(answer); i++ { - off, err = PackRR(answer[i], msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - } - for i := 0; i < len(ns); i++ { - off, err = PackRR(ns[i], msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - } - for i := 0; i < len(extra); i++ { - off, err = PackRR(extra[i], msg, off, compression, dns.Compress) - if err != nil { - return nil, err - } - } - return msg[:off], nil -} - -// Unpack unpacks a binary message to a Msg structure. -func (dns *Msg) Unpack(msg []byte) (err error) { - var ( - dh Header - off int - ) - if dh, off, err = unpackMsgHdr(msg, off); err != nil { - return err - } - if off == len(msg) { - return ErrTruncated - } - - dns.Id = dh.Id - dns.Response = (dh.Bits & _QR) != 0 - dns.Opcode = int(dh.Bits>>11) & 0xF - dns.Authoritative = (dh.Bits & _AA) != 0 - dns.Truncated = (dh.Bits & _TC) != 0 - dns.RecursionDesired = (dh.Bits & _RD) != 0 - dns.RecursionAvailable = (dh.Bits & _RA) != 0 - dns.Zero = (dh.Bits & _Z) != 0 - dns.AuthenticatedData = (dh.Bits & _AD) != 0 - dns.CheckingDisabled = (dh.Bits & _CD) != 0 - dns.Rcode = int(dh.Bits & 0xF) - - // Optimistically use the count given to us in the header - dns.Question = make([]Question, 0, int(dh.Qdcount)) - - for i := 0; i < int(dh.Qdcount); i++ { - off1 := off - var q Question - q, off, err = unpackQuestion(msg, off) - if err != nil { - // Even if Truncated is set, we only will set ErrTruncated if we - // actually got the questions - return err - } - if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! - dh.Qdcount = uint16(i) - break - } - dns.Question = append(dns.Question, q) - } - - dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off) - // The header counts might have been wrong so we need to update it - dh.Ancount = uint16(len(dns.Answer)) - if err == nil { - dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off) - } - // The header counts might have been wrong so we need to update it - dh.Nscount = uint16(len(dns.Ns)) - if err == nil { - dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) - } - // The header counts might have been wrong so we need to update it - dh.Arcount = uint16(len(dns.Extra)) - - if off != len(msg) { - // TODO(miek) make this an error? - // use PackOpt to let people tell how detailed the error reporting should be? - // println("dns: extra bytes in dns packet", off, "<", len(msg)) - } else if dns.Truncated { - // Whether we ran into a an error or not, we want to return that it - // was truncated - err = ErrTruncated - } - return err -} - -// Convert a complete message to a string with dig-like output. -func (dns *Msg) String() string { - if dns == nil { - return " MsgHdr" - } - s := dns.MsgHdr.String() + " " - s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " - s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " - s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " - s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" - if len(dns.Question) > 0 { - s += "\n;; QUESTION SECTION:\n" - for i := 0; i < len(dns.Question); i++ { - s += dns.Question[i].String() + "\n" - } - } - if len(dns.Answer) > 0 { - s += "\n;; ANSWER SECTION:\n" - for i := 0; i < len(dns.Answer); i++ { - if dns.Answer[i] != nil { - s += dns.Answer[i].String() + "\n" - } - } - } - if len(dns.Ns) > 0 { - s += "\n;; AUTHORITY SECTION:\n" - for i := 0; i < len(dns.Ns); i++ { - if dns.Ns[i] != nil { - s += dns.Ns[i].String() + "\n" - } - } - } - if len(dns.Extra) > 0 { - s += "\n;; ADDITIONAL SECTION:\n" - for i := 0; i < len(dns.Extra); i++ { - if dns.Extra[i] != nil { - s += dns.Extra[i].String() + "\n" - } - } - } - return s -} - -// Len returns the message length when in (un)compressed wire format. -// If dns.Compress is true compression it is taken into account. Len() -// is provided to be a faster way to get the size of the resulting packet, -// than packing it, measuring the size and discarding the buffer. -func (dns *Msg) Len() int { - // We always return one more than needed. - l := 12 // Message header is always 12 bytes - var compression map[string]int - if dns.Compress { - compression = make(map[string]int) - } - for i := 0; i < len(dns.Question); i++ { - l += dns.Question[i].len() - if dns.Compress { - compressionLenHelper(compression, dns.Question[i].Name) - } - } - for i := 0; i < len(dns.Answer); i++ { - if dns.Answer[i] == nil { - continue - } - l += dns.Answer[i].len() - if dns.Compress { - k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name) - if ok { - l += 1 - k - } - compressionLenHelper(compression, dns.Answer[i].Header().Name) - k, ok = compressionLenSearchType(compression, dns.Answer[i]) - if ok { - l += 1 - k - } - compressionLenHelperType(compression, dns.Answer[i]) - } - } - for i := 0; i < len(dns.Ns); i++ { - if dns.Ns[i] == nil { - continue - } - l += dns.Ns[i].len() - if dns.Compress { - k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name) - if ok { - l += 1 - k - } - compressionLenHelper(compression, dns.Ns[i].Header().Name) - k, ok = compressionLenSearchType(compression, dns.Ns[i]) - if ok { - l += 1 - k - } - compressionLenHelperType(compression, dns.Ns[i]) - } - } - for i := 0; i < len(dns.Extra); i++ { - if dns.Extra[i] == nil { - continue - } - l += dns.Extra[i].len() - if dns.Compress { - k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name) - if ok { - l += 1 - k - } - compressionLenHelper(compression, dns.Extra[i].Header().Name) - k, ok = compressionLenSearchType(compression, dns.Extra[i]) - if ok { - l += 1 - k - } - compressionLenHelperType(compression, dns.Extra[i]) - } - } - return l -} - -// Put the parts of the name in the compression map. -func compressionLenHelper(c map[string]int, s string) { - pref := "" - lbs := Split(s) - for j := len(lbs) - 1; j >= 0; j-- { - pref = s[lbs[j]:] - if _, ok := c[pref]; !ok { - c[pref] = len(pref) - } - } -} - -// Look for each part in the compression map and returns its length, -// keep on searching so we get the longest match. -func compressionLenSearch(c map[string]int, s string) (int, bool) { - off := 0 - end := false - if s == "" { // don't bork on bogus data - return 0, false - } - for { - if _, ok := c[s[off:]]; ok { - return len(s[off:]), true - } - if end { - break - } - off, end = NextLabel(s, off) - } - return 0, false -} - -// TODO(miek): should add all types, because the all can be *used* for compression. Autogenerate from msg_generate and put in zmsg.go -func compressionLenHelperType(c map[string]int, r RR) { - switch x := r.(type) { - case *NS: - compressionLenHelper(c, x.Ns) - case *MX: - compressionLenHelper(c, x.Mx) - case *CNAME: - compressionLenHelper(c, x.Target) - case *PTR: - compressionLenHelper(c, x.Ptr) - case *SOA: - compressionLenHelper(c, x.Ns) - compressionLenHelper(c, x.Mbox) - case *MB: - compressionLenHelper(c, x.Mb) - case *MG: - compressionLenHelper(c, x.Mg) - case *MR: - compressionLenHelper(c, x.Mr) - case *MF: - compressionLenHelper(c, x.Mf) - case *MD: - compressionLenHelper(c, x.Md) - case *RT: - compressionLenHelper(c, x.Host) - case *RP: - compressionLenHelper(c, x.Mbox) - compressionLenHelper(c, x.Txt) - case *MINFO: - compressionLenHelper(c, x.Rmail) - compressionLenHelper(c, x.Email) - case *AFSDB: - compressionLenHelper(c, x.Hostname) - case *SRV: - compressionLenHelper(c, x.Target) - case *NAPTR: - compressionLenHelper(c, x.Replacement) - case *RRSIG: - compressionLenHelper(c, x.SignerName) - case *NSEC: - compressionLenHelper(c, x.NextDomain) - // HIP? - } -} - -// Only search on compressing these types. -func compressionLenSearchType(c map[string]int, r RR) (int, bool) { - switch x := r.(type) { - case *NS: - return compressionLenSearch(c, x.Ns) - case *MX: - return compressionLenSearch(c, x.Mx) - case *CNAME: - return compressionLenSearch(c, x.Target) - case *DNAME: - return compressionLenSearch(c, x.Target) - case *PTR: - return compressionLenSearch(c, x.Ptr) - case *SOA: - k, ok := compressionLenSearch(c, x.Ns) - k1, ok1 := compressionLenSearch(c, x.Mbox) - if !ok && !ok1 { - return 0, false - } - return k + k1, true - case *MB: - return compressionLenSearch(c, x.Mb) - case *MG: - return compressionLenSearch(c, x.Mg) - case *MR: - return compressionLenSearch(c, x.Mr) - case *MF: - return compressionLenSearch(c, x.Mf) - case *MD: - return compressionLenSearch(c, x.Md) - case *RT: - return compressionLenSearch(c, x.Host) - case *MINFO: - k, ok := compressionLenSearch(c, x.Rmail) - k1, ok1 := compressionLenSearch(c, x.Email) - if !ok && !ok1 { - return 0, false - } - return k + k1, true - case *AFSDB: - return compressionLenSearch(c, x.Hostname) - } - return 0, false -} - -// Copy returns a new RR which is a deep-copy of r. -func Copy(r RR) RR { r1 := r.copy(); return r1 } - -// Len returns the length (in octets) of the uncompressed RR in wire format. -func Len(r RR) int { return r.len() } - -// Copy returns a new *Msg which is a deep-copy of dns. -func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } - -// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. -func (dns *Msg) CopyTo(r1 *Msg) *Msg { - r1.MsgHdr = dns.MsgHdr - r1.Compress = dns.Compress - - if len(dns.Question) > 0 { - r1.Question = make([]Question, len(dns.Question)) - copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy - } - - rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) - var rri int - - if len(dns.Answer) > 0 { - rrbegin := rri - for i := 0; i < len(dns.Answer); i++ { - rrArr[rri] = dns.Answer[i].copy() - rri++ - } - r1.Answer = rrArr[rrbegin:rri:rri] - } - - if len(dns.Ns) > 0 { - rrbegin := rri - for i := 0; i < len(dns.Ns); i++ { - rrArr[rri] = dns.Ns[i].copy() - rri++ - } - r1.Ns = rrArr[rrbegin:rri:rri] - } - - if len(dns.Extra) > 0 { - rrbegin := rri - for i := 0; i < len(dns.Extra); i++ { - rrArr[rri] = dns.Extra[i].copy() - rri++ - } - r1.Extra = rrArr[rrbegin:rri:rri] - } - - return r1 -} - -func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := PackDomainName(q.Name, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packUint16(q.Qtype, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(q.Qclass, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func unpackQuestion(msg []byte, off int) (Question, int, error) { - var ( - q Question - err error - ) - q.Name, off, err = UnpackDomainName(msg, off) - if err != nil { - return q, off, err - } - if off == len(msg) { - return q, off, nil - } - q.Qtype, off, err = unpackUint16(msg, off) - if err != nil { - return q, off, err - } - if off == len(msg) { - return q, off, nil - } - q.Qclass, off, err = unpackUint16(msg, off) - if off == len(msg) { - return q, off, nil - } - return q, off, err -} - -func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := packUint16(dh.Id, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Bits, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Qdcount, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Ancount, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Nscount, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Arcount, msg, off) - return off, err -} - -func unpackMsgHdr(msg []byte, off int) (Header, int, error) { - var ( - dh Header - err error - ) - dh.Id, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Bits, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Qdcount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Ancount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Nscount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Arcount, off, err = unpackUint16(msg, off) - return dh, off, err -} diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go deleted file mode 100644 index e7a9500c..00000000 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ /dev/null @@ -1,630 +0,0 @@ -package dns - -import ( - "encoding/base32" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "net" - "strconv" -) - -// helper functions called from the generated zmsg.go - -// These function are named after the tag to help pack/unpack, if there is no tag it is the name -// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or -// packDataDomainName. - -func unpackDataA(msg []byte, off int) (net.IP, int, error) { - if off+net.IPv4len > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking a"} - } - a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) - off += net.IPv4len - return a, off, nil -} - -func packDataA(a net.IP, msg []byte, off int) (int, error) { - // It must be a slice of 4, even if it is 16, we encode only the first 4 - if off+net.IPv4len > len(msg) { - return len(msg), &Error{err: "overflow packing a"} - } - switch len(a) { - case net.IPv4len, net.IPv6len: - copy(msg[off:], a.To4()) - off += net.IPv4len - case 0: - // Allowed, for dynamic updates. - default: - return len(msg), &Error{err: "overflow packing a"} - } - return off, nil -} - -func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { - if off+net.IPv6len > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking aaaa"} - } - aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) - off += net.IPv6len - return aaaa, off, nil -} - -func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { - if off+net.IPv6len > len(msg) { - return len(msg), &Error{err: "overflow packing aaaa"} - } - - switch len(aaaa) { - case net.IPv6len: - copy(msg[off:], aaaa) - off += net.IPv6len - case 0: - // Allowed, dynamic updates. - default: - return len(msg), &Error{err: "overflow packing aaaa"} - } - return off, nil -} - -// unpackHeader unpacks an RR header, returning the offset to the end of the header and a -// re-sliced msg according to the expected length of the RR. -func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) { - hdr := RR_Header{} - if off == len(msg) { - return hdr, off, msg, nil - } - - hdr.Name, off, err = UnpackDomainName(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Rrtype, off, err = unpackUint16(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Class, off, err = unpackUint16(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Ttl, off, err = unpackUint32(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Rdlength, off, err = unpackUint16(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) - return hdr, off, msg, nil -} - -// pack packs an RR header, returning the offset to the end of the header. -// See PackDomainName for documentation about the compression. -func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - if off == len(msg) { - return off, nil - } - - off, err = PackDomainName(hdr.Name, msg, off, compression, compress) - if err != nil { - return len(msg), err - } - off, err = packUint16(hdr.Rrtype, msg, off) - if err != nil { - return len(msg), err - } - off, err = packUint16(hdr.Class, msg, off) - if err != nil { - return len(msg), err - } - off, err = packUint32(hdr.Ttl, msg, off) - if err != nil { - return len(msg), err - } - off, err = packUint16(hdr.Rdlength, msg, off) - if err != nil { - return len(msg), err - } - return off, nil -} - -// helper helper functions. - -// truncateMsgFromRdLength truncates msg to match the expected length of the RR. -// Returns an error if msg is smaller than the expected size. -func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) { - lenrd := off + int(rdlength) - if lenrd > len(msg) { - return msg, &Error{err: "overflowing header size"} - } - return msg[:lenrd], nil -} - -func fromBase32(s []byte) (buf []byte, err error) { - buflen := base32.HexEncoding.DecodedLen(len(s)) - buf = make([]byte, buflen) - n, err := base32.HexEncoding.Decode(buf, s) - buf = buf[:n] - return -} - -func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) } - -func fromBase64(s []byte) (buf []byte, err error) { - buflen := base64.StdEncoding.DecodedLen(len(s)) - buf = make([]byte, buflen) - n, err := base64.StdEncoding.Decode(buf, s) - buf = buf[:n] - return -} - -func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) } - -// dynamicUpdate returns true if the Rdlength is zero. -func noRdata(h RR_Header) bool { return h.Rdlength == 0 } - -func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { - if off+1 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint8"} - } - return uint8(msg[off]), off + 1, nil -} - -func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { - if off+1 > len(msg) { - return len(msg), &Error{err: "overflow packing uint8"} - } - msg[off] = byte(i) - return off + 1, nil -} - -func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) { - if off+2 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint16"} - } - return binary.BigEndian.Uint16(msg[off:]), off + 2, nil -} - -func packUint16(i uint16, msg []byte, off int) (off1 int, err error) { - if off+2 > len(msg) { - return len(msg), &Error{err: "overflow packing uint16"} - } - binary.BigEndian.PutUint16(msg[off:], i) - return off + 2, nil -} - -func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) { - if off+4 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint32"} - } - return binary.BigEndian.Uint32(msg[off:]), off + 4, nil -} - -func packUint32(i uint32, msg []byte, off int) (off1 int, err error) { - if off+4 > len(msg) { - return len(msg), &Error{err: "overflow packing uint32"} - } - binary.BigEndian.PutUint32(msg[off:], i) - return off + 4, nil -} - -func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { - if off+6 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} - } - // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) - i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | - uint64(msg[off+4])<<8 | uint64(msg[off+5]))) - off += 6 - return i, off, nil -} - -func packUint48(i uint64, msg []byte, off int) (off1 int, err error) { - if off+6 > len(msg) { - return len(msg), &Error{err: "overflow packing uint64 as uint48"} - } - msg[off] = byte(i >> 40) - msg[off+1] = byte(i >> 32) - msg[off+2] = byte(i >> 24) - msg[off+3] = byte(i >> 16) - msg[off+4] = byte(i >> 8) - msg[off+5] = byte(i) - off += 6 - return off, nil -} - -func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) { - if off+8 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint64"} - } - return binary.BigEndian.Uint64(msg[off:]), off + 8, nil -} - -func packUint64(i uint64, msg []byte, off int) (off1 int, err error) { - if off+8 > len(msg) { - return len(msg), &Error{err: "overflow packing uint64"} - } - binary.BigEndian.PutUint64(msg[off:], i) - off += 8 - return off, nil -} - -func unpackString(msg []byte, off int) (string, int, error) { - if off+1 > len(msg) { - return "", off, &Error{err: "overflow unpacking txt"} - } - l := int(msg[off]) - if off+l+1 > len(msg) { - return "", off, &Error{err: "overflow unpacking txt"} - } - s := make([]byte, 0, l) - for _, b := range msg[off+1 : off+1+l] { - switch b { - case '"', '\\': - s = append(s, '\\', b) - case '\t', '\r', '\n': - s = append(s, b) - default: - if b < 32 || b > 127 { // unprintable - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := 0; i < 3-len(bufs); i++ { - s = append(s, '0') - } - for _, r := range bufs { - s = append(s, r) - } - } else { - s = append(s, b) - } - } - } - off += 1 + l - return string(s), off, nil -} - -func packString(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packTxtString(s, msg, off, txtTmp) - if err != nil { - return len(msg), err - } - return off, nil -} - -func unpackStringBase32(msg []byte, off, end int) (string, int, error) { - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking base32"} - } - s := toBase32(msg[off:end]) - return s, end, nil -} - -func packStringBase32(s string, msg []byte, off int) (int, error) { - b32, err := fromBase32([]byte(s)) - if err != nil { - return len(msg), err - } - if off+len(b32) > len(msg) { - return len(msg), &Error{err: "overflow packing base32"} - } - copy(msg[off:off+len(b32)], b32) - off += len(b32) - return off, nil -} - -func unpackStringBase64(msg []byte, off, end int) (string, int, error) { - // Rest of the RR is base64 encoded value, so we don't need an explicit length - // to be set. Thus far all RR's that have base64 encoded fields have those as their - // last one. What we do need is the end of the RR! - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking base64"} - } - s := toBase64(msg[off:end]) - return s, end, nil -} - -func packStringBase64(s string, msg []byte, off int) (int, error) { - b64, err := fromBase64([]byte(s)) - if err != nil { - return len(msg), err - } - if off+len(b64) > len(msg) { - return len(msg), &Error{err: "overflow packing base64"} - } - copy(msg[off:off+len(b64)], b64) - off += len(b64) - return off, nil -} - -func unpackStringHex(msg []byte, off, end int) (string, int, error) { - // Rest of the RR is hex encoded value, so we don't need an explicit length - // to be set. NSEC and TSIG have hex fields with a length field. - // What we do need is the end of the RR! - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking hex"} - } - - s := hex.EncodeToString(msg[off:end]) - return s, end, nil -} - -func packStringHex(s string, msg []byte, off int) (int, error) { - h, err := hex.DecodeString(s) - if err != nil { - return len(msg), err - } - if off+(len(h)) > len(msg) { - return len(msg), &Error{err: "overflow packing hex"} - } - copy(msg[off:off+len(h)], h) - off += len(h) - return off, nil -} - -func unpackStringTxt(msg []byte, off int) ([]string, int, error) { - txt, off, err := unpackTxt(msg, off) - if err != nil { - return nil, len(msg), err - } - return txt, off, nil -} - -func packStringTxt(s []string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. - off, err := packTxt(s, msg, off, txtTmp) - if err != nil { - return len(msg), err - } - return off, nil -} - -func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { - var edns []EDNS0 -Option: - code := uint16(0) - if off+4 > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking opt"} - } - code = binary.BigEndian.Uint16(msg[off:]) - off += 2 - optlen := binary.BigEndian.Uint16(msg[off:]) - off += 2 - if off+int(optlen) > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking opt"} - } - switch code { - case EDNS0NSID: - e := new(EDNS0_NSID) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0SUBNET, EDNS0SUBNETDRAFT: - e := new(EDNS0_SUBNET) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - if code == EDNS0SUBNETDRAFT { - e.DraftOption = true - } - case EDNS0COOKIE: - e := new(EDNS0_COOKIE) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0UL: - e := new(EDNS0_UL) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0LLQ: - e := new(EDNS0_LLQ) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0DAU: - e := new(EDNS0_DAU) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0DHU: - e := new(EDNS0_DHU) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0N3U: - e := new(EDNS0_N3U) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - default: - e := new(EDNS0_LOCAL) - e.Code = code - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - } - - if off < len(msg) { - goto Option - } - - return edns, off, nil -} - -func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { - for _, el := range options { - b, err := el.pack() - if err != nil || off+3 > len(msg) { - return len(msg), &Error{err: "overflow packing opt"} - } - binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code - binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length - off += 4 - if off+len(b) > len(msg) { - copy(msg[off:], b) - off = len(msg) - continue - } - // Actual data - copy(msg[off:off+len(b)], b) - off += len(b) - } - return off, nil -} - -func unpackStringOctet(msg []byte, off int) (string, int, error) { - s := string(msg[off:]) - return s, len(msg), nil -} - -func packStringOctet(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packOctetString(s, msg, off, txtTmp) - if err != nil { - return len(msg), err - } - return off, nil -} - -func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { - var nsec []uint16 - length, window, lastwindow := 0, 0, -1 - for off < len(msg) { - if off+2 > len(msg) { - return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} - } - window = int(msg[off]) - length = int(msg[off+1]) - off += 2 - if window <= lastwindow { - // RFC 4034: Blocks are present in the NSEC RR RDATA in - // increasing numerical order. - return nsec, len(msg), &Error{err: "out of order NSEC block"} - } - if length == 0 { - // RFC 4034: Blocks with no types present MUST NOT be included. - return nsec, len(msg), &Error{err: "empty NSEC block"} - } - if length > 32 { - return nsec, len(msg), &Error{err: "NSEC block too long"} - } - if off+length > len(msg) { - return nsec, len(msg), &Error{err: "overflowing NSEC block"} - } - - // Walk the bytes in the window and extract the type bits - for j := 0; j < length; j++ { - b := msg[off+j] - // Check the bits one by one, and set the type - if b&0x80 == 0x80 { - nsec = append(nsec, uint16(window*256+j*8+0)) - } - if b&0x40 == 0x40 { - nsec = append(nsec, uint16(window*256+j*8+1)) - } - if b&0x20 == 0x20 { - nsec = append(nsec, uint16(window*256+j*8+2)) - } - if b&0x10 == 0x10 { - nsec = append(nsec, uint16(window*256+j*8+3)) - } - if b&0x8 == 0x8 { - nsec = append(nsec, uint16(window*256+j*8+4)) - } - if b&0x4 == 0x4 { - nsec = append(nsec, uint16(window*256+j*8+5)) - } - if b&0x2 == 0x2 { - nsec = append(nsec, uint16(window*256+j*8+6)) - } - if b&0x1 == 0x1 { - nsec = append(nsec, uint16(window*256+j*8+7)) - } - } - off += length - lastwindow = window - } - return nsec, off, nil -} - -func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { - if len(bitmap) == 0 { - return off, nil - } - var lastwindow, lastlength uint16 - for j := 0; j < len(bitmap); j++ { - t := bitmap[j] - window := t / 256 - length := (t-window*256)/8 + 1 - if window > lastwindow && lastlength != 0 { // New window, jump to the new offset - off += int(lastlength) + 2 - lastlength = 0 - } - if window < lastwindow || length < lastlength { - return len(msg), &Error{err: "nsec bits out of order"} - } - if off+2+int(length) > len(msg) { - return len(msg), &Error{err: "overflow packing nsec"} - } - // Setting the window # - msg[off] = byte(window) - // Setting the octets length - msg[off+1] = byte(length) - // Setting the bit value for the type in the right octet - msg[off+1+int(length)] |= byte(1 << (7 - (t % 8))) - lastwindow, lastlength = window, length - } - off += int(lastlength) + 2 - return off, nil -} - -func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { - var ( - servers []string - s string - err error - ) - if end > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking domain names"} - } - for off < end { - s, off, err = UnpackDomainName(msg, off) - if err != nil { - return servers, len(msg), err - } - servers = append(servers, s) - } - return servers, off, nil -} - -func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) { - var err error - for j := 0; j < len(names); j++ { - off, err = PackDomainName(names[j], msg, off, compression, false && compress) - if err != nil { - return len(msg), err - } - } - return off, nil -} diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go deleted file mode 100644 index 6f10f3e6..00000000 --- a/vendor/github.com/miekg/dns/nsecx.go +++ /dev/null @@ -1,119 +0,0 @@ -package dns - -import ( - "crypto/sha1" - "hash" - "io" - "strings" -) - -type saltWireFmt struct { - Salt string `dns:"size-hex"` -} - -// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. -func HashName(label string, ha uint8, iter uint16, salt string) string { - saltwire := new(saltWireFmt) - saltwire.Salt = salt - wire := make([]byte, DefaultMsgSize) - n, err := packSaltWire(saltwire, wire) - if err != nil { - return "" - } - wire = wire[:n] - name := make([]byte, 255) - off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) - if err != nil { - return "" - } - name = name[:off] - var s hash.Hash - switch ha { - case SHA1: - s = sha1.New() - default: - return "" - } - - // k = 0 - name = append(name, wire...) - io.WriteString(s, string(name)) - nsec3 := s.Sum(nil) - // k > 0 - for k := uint16(0); k < iter; k++ { - s.Reset() - nsec3 = append(nsec3, wire...) - io.WriteString(s, string(nsec3)) - nsec3 = s.Sum(nil) - } - return toBase32(nsec3) -} - -// Denialer is an interface that should be implemented by types that are used to denial -// answers in DNSSEC. -type Denialer interface { - // Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3. - Cover(name string) bool - // Match will check if the ownername matches the (unhashed) name for this NSEC3 or NSEC3. - Match(name string) bool -} - -// Cover implements the Denialer interface. -func (rr *NSEC) Cover(name string) bool { - return true -} - -// Match implements the Denialer interface. -func (rr *NSEC) Match(name string) bool { - return true -} - -// Cover implements the Denialer interface. -func (rr *NSEC3) Cover(name string) bool { - // FIXME(miek): check if the zones match - // FIXME(miek): check if we're not dealing with parent nsec3 - hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt) - labels := Split(rr.Hdr.Name) - if len(labels) < 2 { - return false - } - hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the dot - if hash == rr.NextDomain { - return false // empty interval - } - if hash > rr.NextDomain { // last name, points to apex - // hname > hash - // hname > rr.NextDomain - // TODO(miek) - } - if hname <= hash { - return false - } - if hname >= rr.NextDomain { - return false - } - return true -} - -// Match implements the Denialer interface. -func (rr *NSEC3) Match(name string) bool { - // FIXME(miek): Check if we are in the same zone - hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt) - labels := Split(rr.Hdr.Name) - if len(labels) < 2 { - return false - } - hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the . - if hash == hname { - return true - } - return false -} - -func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) { - off, err := packStringHex(sw.Salt, msg, 0) - if err != nil { - return off, err - } - return off, nil -} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go deleted file mode 100644 index 6b08e6e9..00000000 --- a/vendor/github.com/miekg/dns/privaterr.go +++ /dev/null @@ -1,149 +0,0 @@ -package dns - -import ( - "fmt" - "strings" -) - -// PrivateRdata is an interface used for implementing "Private Use" RR types, see -// RFC 6895. This allows one to experiment with new RR types, without requesting an -// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. -type PrivateRdata interface { - // String returns the text presentaton of the Rdata of the Private RR. - String() string - // Parse parses the Rdata of the private RR. - Parse([]string) error - // Pack is used when packing a private RR into a buffer. - Pack([]byte) (int, error) - // Unpack is used when unpacking a private RR from a buffer. - // TODO(miek): diff. signature than Pack, see edns0.go for instance. - Unpack([]byte) (int, error) - // Copy copies the Rdata. - Copy(PrivateRdata) error - // Len returns the length in octets of the Rdata. - Len() int -} - -// PrivateRR represents an RR that uses a PrivateRdata user-defined type. -// It mocks normal RRs and implements dns.RR interface. -type PrivateRR struct { - Hdr RR_Header - Data PrivateRdata -} - -func mkPrivateRR(rrtype uint16) *PrivateRR { - // Panics if RR is not an instance of PrivateRR. - rrfunc, ok := TypeToRR[rrtype] - if !ok { - panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype)) - } - - anyrr := rrfunc() - switch rr := anyrr.(type) { - case *PrivateRR: - return rr - } - panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr)) -} - -// Header return the RR header of r. -func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } - -func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } - -// Private len and copy parts to satisfy RR interface. -func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() } -func (r *PrivateRR) copy() RR { - // make new RR like this: - rr := mkPrivateRR(r.Hdr.Rrtype) - newh := r.Hdr.copyHeader() - rr.Hdr = *newh - - err := r.Data.Copy(rr.Data) - if err != nil { - panic("dns: got value that could not be used to copy Private rdata") - } - return rr -} -func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := r.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - n, err := r.Data.Pack(msg[off:]) - if err != nil { - return len(msg), err - } - off += n - r.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -// PrivateHandle registers a private resource record type. It requires -// string and numeric representation of private RR type and generator function as argument. -func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { - rtypestr = strings.ToUpper(rtypestr) - - TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } - TypeToString[rtype] = rtypestr - StringToType[rtypestr] = rtype - - typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) { - if noRdata(h) { - return &h, off, nil - } - var err error - - rr := mkPrivateRR(h.Rrtype) - rr.Hdr = h - - off1, err := rr.Data.Unpack(msg[off:]) - off += off1 - if err != nil { - return rr, off, err - } - return rr, off, err - } - - setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := mkPrivateRR(h.Rrtype) - rr.Hdr = h - - var l lex - text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 - Fetch: - for { - // TODO(miek): we could also be returning _QUOTE, this might or might not - // be an issue (basically parsing TXT becomes hard) - switch l = <-c; l.value { - case zNewline, zEOF: - break Fetch - case zString: - text = append(text, l.token) - } - } - - err := rr.Data.Parse(text) - if err != nil { - return nil, &ParseError{f, err.Error(), l}, "" - } - - return rr, nil, "" - } - - typeToparserFunc[rtype] = parserFunc{setPrivateRR, true} -} - -// PrivateHandleRemove removes defenitions required to support private RR type. -func PrivateHandleRemove(rtype uint16) { - rtypestr, ok := TypeToString[rtype] - if ok { - delete(TypeToRR, rtype) - delete(TypeToString, rtype) - delete(typeToparserFunc, rtype) - delete(StringToType, rtypestr) - delete(typeToUnpack, rtype) - } - return -} diff --git a/vendor/github.com/miekg/dns/rawmsg.go b/vendor/github.com/miekg/dns/rawmsg.go deleted file mode 100644 index 6e21fba7..00000000 --- a/vendor/github.com/miekg/dns/rawmsg.go +++ /dev/null @@ -1,49 +0,0 @@ -package dns - -import "encoding/binary" - -// rawSetRdlength sets the rdlength in the header of -// the RR. The offset 'off' must be positioned at the -// start of the header of the RR, 'end' must be the -// end of the RR. -func rawSetRdlength(msg []byte, off, end int) bool { - l := len(msg) -Loop: - for { - if off+1 > l { - return false - } - c := int(msg[off]) - off++ - switch c & 0xC0 { - case 0x00: - if c == 0x00 { - // End of the domainname - break Loop - } - if off+c > l { - return false - } - off += c - - case 0xC0: - // pointer, next byte included, ends domainname - off++ - break Loop - } - } - // The domainname has been seen, we at the start of the fixed part in the header. - // Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length. - off += 2 + 2 + 4 - if off+2 > l { - return false - } - //off+1 is the end of the header, 'end' is the end of the rr - //so 'end' - 'off+2' is the length of the rdata - rdatalen := end - (off + 2) - if rdatalen > 0xFFFF { - return false - } - binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen)) - return true -} diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go deleted file mode 100644 index 099dac94..00000000 --- a/vendor/github.com/miekg/dns/reverse.go +++ /dev/null @@ -1,38 +0,0 @@ -package dns - -// StringToType is the reverse of TypeToString, needed for string parsing. -var StringToType = reverseInt16(TypeToString) - -// StringToClass is the reverse of ClassToString, needed for string parsing. -var StringToClass = reverseInt16(ClassToString) - -// Map of opcodes strings. -var StringToOpcode = reverseInt(OpcodeToString) - -// Map of rcodes strings. -var StringToRcode = reverseInt(RcodeToString) - -// Reverse a map -func reverseInt8(m map[uint8]string) map[string]uint8 { - n := make(map[string]uint8, len(m)) - for u, s := range m { - n[s] = u - } - return n -} - -func reverseInt16(m map[uint16]string) map[string]uint16 { - n := make(map[string]uint16, len(m)) - for u, s := range m { - n[s] = u - } - return n -} - -func reverseInt(m map[int]string) map[string]int { - n := make(map[string]int, len(m)) - for u, s := range m { - n[s] = u - } - return n -} diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go deleted file mode 100644 index b489f3f0..00000000 --- a/vendor/github.com/miekg/dns/sanitize.go +++ /dev/null @@ -1,84 +0,0 @@ -package dns - -// Dedup removes identical RRs from rrs. It preserves the original ordering. -// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies -// rrs. -// m is used to store the RRs temporay. If it is nil a new map will be allocated. -func Dedup(rrs []RR, m map[string]RR) []RR { - if m == nil { - m = make(map[string]RR) - } - // Save the keys, so we don't have to call normalizedString twice. - keys := make([]*string, 0, len(rrs)) - - for _, r := range rrs { - key := normalizedString(r) - keys = append(keys, &key) - if _, ok := m[key]; ok { - // Shortest TTL wins. - if m[key].Header().Ttl > r.Header().Ttl { - m[key].Header().Ttl = r.Header().Ttl - } - continue - } - - m[key] = r - } - // If the length of the result map equals the amount of RRs we got, - // it means they were all different. We can then just return the original rrset. - if len(m) == len(rrs) { - return rrs - } - - j := 0 - for i, r := range rrs { - // If keys[i] lives in the map, we should copy and remove it. - if _, ok := m[*keys[i]]; ok { - delete(m, *keys[i]) - rrs[j] = r - j++ - } - - if len(m) == 0 { - break - } - } - - return rrs[:j] -} - -// normalizedString returns a normalized string from r. The TTL -// is removed and the domain name is lowercased. We go from this: -// DomainNameTTLCLASSTYPERDATA to: -// lowercasenameCLASSTYPE... -func normalizedString(r RR) string { - // A string Go DNS makes has: domainnameTTL... - b := []byte(r.String()) - - // find the first non-escaped tab, then another, so we capture where the TTL lives. - esc := false - ttlStart, ttlEnd := 0, 0 - for i := 0; i < len(b) && ttlEnd == 0; i++ { - switch { - case b[i] == '\\': - esc = !esc - case b[i] == '\t' && !esc: - if ttlStart == 0 { - ttlStart = i - continue - } - if ttlEnd == 0 { - ttlEnd = i - } - case b[i] >= 'A' && b[i] <= 'Z' && !esc: - b[i] += 32 - default: - esc = false - } - } - - // remove TTL. - copy(b[ttlStart:], b[ttlEnd:]) - cut := ttlEnd - ttlStart - return string(b[:len(b)-cut]) -} diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go deleted file mode 100644 index d34597ba..00000000 --- a/vendor/github.com/miekg/dns/scan.go +++ /dev/null @@ -1,981 +0,0 @@ -package dns - -import ( - "io" - "log" - "os" - "strconv" - "strings" -) - -type debugging bool - -const debug debugging = false - -func (d debugging) Printf(format string, args ...interface{}) { - if d { - log.Printf(format, args...) - } -} - -const maxTok = 2048 // Largest token we can return. -const maxUint16 = 1<<16 - 1 - -// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: -// * Add ownernames if they are left blank; -// * Suppress sequences of spaces; -// * Make each RR fit on one line (_NEWLINE is send as last) -// * Handle comments: ; -// * Handle braces - anywhere. -const ( - // Zonefile - zEOF = iota - zString - zBlank - zQuote - zNewline - zRrtpe - zOwner - zClass - zDirOrigin // $ORIGIN - zDirTtl // $TTL - zDirInclude // $INCLUDE - zDirGenerate // $GENERATE - - // Privatekey file - zValue - zKey - - zExpectOwnerDir // Ownername - zExpectOwnerBl // Whitespace after the ownername - zExpectAny // Expect rrtype, ttl or class - zExpectAnyNoClass // Expect rrtype or ttl - zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS - zExpectAnyNoTtl // Expect rrtype or class - zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL - zExpectRrtype // Expect rrtype - zExpectRrtypeBl // Whitespace BEFORE rrtype - zExpectRdata // The first element of the rdata - zExpectDirTtlBl // Space after directive $TTL - zExpectDirTtl // Directive $TTL - zExpectDirOriginBl // Space after directive $ORIGIN - zExpectDirOrigin // Directive $ORIGIN - zExpectDirIncludeBl // Space after directive $INCLUDE - zExpectDirInclude // Directive $INCLUDE - zExpectDirGenerate // Directive $GENERATE - zExpectDirGenerateBl // Space after directive $GENERATE -) - -// ParseError is a parsing error. It contains the parse error and the location in the io.Reader -// where the error occurred. -type ParseError struct { - file string - err string - lex lex -} - -func (e *ParseError) Error() (s string) { - if e.file != "" { - s = e.file + ": " - } - s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + - strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) - return -} - -type lex struct { - token string // text of the token - tokenUpper string // uppercase text of the token - length int // length of the token - err bool // when true, token text has lexer error - value uint8 // value: zString, _BLANK, etc. - line int // line in the file - column int // column in the file - torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar - comment string // any comment text seen -} - -// Token holds the token that are returned when a zone file is parsed. -type Token struct { - // The scanned resource record when error is not nil. - RR - // When an error occurred, this has the error specifics. - Error *ParseError - // A potential comment positioned after the RR and on the same line. - Comment string -} - -// NewRR reads the RR contained in the string s. Only the first RR is -// returned. If s contains no RR, return nil with no error. The class -// defaults to IN and TTL defaults to 3600. The full zone file syntax -// like $TTL, $ORIGIN, etc. is supported. All fields of the returned -// RR are set, except RR.Header().Rdlength which is set to 0. -func NewRR(s string) (RR, error) { - if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline - return ReadRR(strings.NewReader(s+"\n"), "") - } - return ReadRR(strings.NewReader(s), "") -} - -// ReadRR reads the RR contained in q. -// See NewRR for more documentation. -func ReadRR(q io.Reader, filename string) (RR, error) { - r := <-parseZoneHelper(q, ".", filename, 1) - if r == nil { - return nil, nil - } - - if r.Error != nil { - return nil, r.Error - } - return r.RR, nil -} - -// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the -// returned channel, which consist out the parsed RR, a potential comment or an error. -// If there is an error the RR is nil. The string file is only used -// in error reporting. The string origin is used as the initial origin, as -// if the file would start with: $ORIGIN origin . -// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported. -// The channel t is closed by ParseZone when the end of r is reached. -// -// Basic usage pattern when reading from a string (z) containing the -// zone data: -// -// for x := range dns.ParseZone(strings.NewReader(z), "", "") { -// if x.Error != nil { -// // log.Println(x.Error) -// } else { -// // Do something with x.RR -// } -// } -// -// Comments specified after an RR (and on the same line!) are returned too: -// -// foo. IN A 10.0.0.1 ; this is a comment -// -// The text "; this is comment" is returned in Token.Comment. Comments inside the -// RR are discarded. Comments on a line by themselves are discarded too. -func ParseZone(r io.Reader, origin, file string) chan *Token { - return parseZoneHelper(r, origin, file, 10000) -} - -func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token { - t := make(chan *Token, chansize) - go parseZone(r, origin, file, t, 0) - return t -} - -func parseZone(r io.Reader, origin, f string, t chan *Token, include int) { - defer func() { - if include == 0 { - close(t) - } - }() - s := scanInit(r) - c := make(chan lex) - // Start the lexer - go zlexer(s, c) - // 6 possible beginnings of a line, _ is a space - // 0. zRRTYPE -> all omitted until the rrtype - // 1. zOwner _ zRrtype -> class/ttl omitted - // 2. zOwner _ zString _ zRrtype -> class omitted - // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class - // 4. zOwner _ zClass _ zRrtype -> ttl omitted - // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) - // After detecting these, we know the zRrtype so we can jump to functions - // handling the rdata for each of these types. - - if origin == "" { - origin = "." - } - origin = Fqdn(origin) - if _, ok := IsDomainName(origin); !ok { - t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}} - return - } - - st := zExpectOwnerDir // initial state - var h RR_Header - var defttl uint32 = defaultTtl - var prevName string - for l := range c { - // Lexer spotted an error already - if l.err == true { - t <- &Token{Error: &ParseError{f, l.token, l}} - return - - } - switch st { - case zExpectOwnerDir: - // We can also expect a directive, like $TTL or $ORIGIN - h.Ttl = defttl - h.Class = ClassINET - switch l.value { - case zNewline: - st = zExpectOwnerDir - case zOwner: - h.Name = l.token - if l.token[0] == '@' { - h.Name = origin - prevName = h.Name - st = zExpectOwnerBl - break - } - if h.Name[l.length-1] != '.' { - h.Name = appendOrigin(h.Name, origin) - } - _, ok := IsDomainName(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "bad owner name", l}} - return - } - prevName = h.Name - st = zExpectOwnerBl - case zDirTtl: - st = zExpectDirTtlBl - case zDirOrigin: - st = zExpectDirOriginBl - case zDirInclude: - st = zExpectDirIncludeBl - case zDirGenerate: - st = zExpectDirGenerateBl - case zRrtpe: - h.Name = prevName - h.Rrtype = l.torc - st = zExpectRdata - case zClass: - h.Name = prevName - h.Class = l.torc - st = zExpectAnyNoClassBl - case zBlank: - // Discard, can happen when there is nothing on the - // line except the RR type - case zString: - ttl, ok := stringToTtl(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "not a TTL", l}} - return - } - h.Ttl = ttl - // Don't about the defttl, we should take the $TTL value - // defttl = ttl - st = zExpectAnyNoTtlBl - - default: - t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}} - return - } - case zExpectDirIncludeBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}} - return - } - st = zExpectDirInclude - case zExpectDirInclude: - if l.value != zString { - t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}} - return - } - neworigin := origin // There may be optionally a new origin set after the filename, if not use current one - l := <-c - switch l.value { - case zBlank: - l := <-c - if l.value == zString { - if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err { - t <- &Token{Error: &ParseError{f, "bad origin name", l}} - return - } - // a new origin is specified. - if l.token[l.length-1] != '.' { - if origin != "." { // Prevent .. endings - neworigin = l.token + "." + origin - } else { - neworigin = l.token + origin - } - } else { - neworigin = l.token - } - } - case zNewline, zEOF: - // Ok - default: - t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}} - return - } - // Start with the new file - r1, e1 := os.Open(l.token) - if e1 != nil { - t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}} - return - } - if include+1 > 7 { - t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}} - return - } - parseZone(r1, l.token, neworigin, t, include+1) - st = zExpectOwnerDir - case zExpectDirTtlBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}} - return - } - st = zExpectDirTtl - case zExpectDirTtl: - if l.value != zString { - t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} - return - } - if e, _ := slurpRemainder(c, f); e != nil { - t <- &Token{Error: e} - return - } - ttl, ok := stringToTtl(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} - return - } - defttl = ttl - st = zExpectOwnerDir - case zExpectDirOriginBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}} - return - } - st = zExpectDirOrigin - case zExpectDirOrigin: - if l.value != zString { - t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}} - return - } - if e, _ := slurpRemainder(c, f); e != nil { - t <- &Token{Error: e} - } - if _, ok := IsDomainName(l.token); !ok { - t <- &Token{Error: &ParseError{f, "bad origin name", l}} - return - } - if l.token[l.length-1] != '.' { - if origin != "." { // Prevent .. endings - origin = l.token + "." + origin - } else { - origin = l.token + origin - } - } else { - origin = l.token - } - st = zExpectOwnerDir - case zExpectDirGenerateBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}} - return - } - st = zExpectDirGenerate - case zExpectDirGenerate: - if l.value != zString { - t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}} - return - } - if errMsg := generate(l, c, t, origin); errMsg != "" { - t <- &Token{Error: &ParseError{f, errMsg, l}} - return - } - st = zExpectOwnerDir - case zExpectOwnerBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank after owner", l}} - return - } - st = zExpectAny - case zExpectAny: - switch l.value { - case zRrtpe: - h.Rrtype = l.torc - st = zExpectRdata - case zClass: - h.Class = l.torc - st = zExpectAnyNoClassBl - case zString: - ttl, ok := stringToTtl(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "not a TTL", l}} - return - } - h.Ttl = ttl - // defttl = ttl // don't set the defttl here - st = zExpectAnyNoTtlBl - default: - t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}} - return - } - case zExpectAnyNoClassBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank before class", l}} - return - } - st = zExpectAnyNoClass - case zExpectAnyNoTtlBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank before TTL", l}} - return - } - st = zExpectAnyNoTtl - case zExpectAnyNoTtl: - switch l.value { - case zClass: - h.Class = l.torc - st = zExpectRrtypeBl - case zRrtpe: - h.Rrtype = l.torc - st = zExpectRdata - default: - t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}} - return - } - case zExpectAnyNoClass: - switch l.value { - case zString: - ttl, ok := stringToTtl(l.token) - if !ok { - t <- &Token{Error: &ParseError{f, "not a TTL", l}} - return - } - h.Ttl = ttl - // defttl = ttl // don't set the def ttl anymore - st = zExpectRrtypeBl - case zRrtpe: - h.Rrtype = l.torc - st = zExpectRdata - default: - t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}} - return - } - case zExpectRrtypeBl: - if l.value != zBlank { - t <- &Token{Error: &ParseError{f, "no blank before RR type", l}} - return - } - st = zExpectRrtype - case zExpectRrtype: - if l.value != zRrtpe { - t <- &Token{Error: &ParseError{f, "unknown RR type", l}} - return - } - h.Rrtype = l.torc - st = zExpectRdata - case zExpectRdata: - r, e, c1 := setRR(h, c, origin, f) - if e != nil { - // If e.lex is nil than we have encounter a unknown RR type - // in that case we substitute our current lex token - if e.lex.token == "" && e.lex.value == 0 { - e.lex = l // Uh, dirty - } - t <- &Token{Error: e} - return - } - t <- &Token{RR: r, Comment: c1} - st = zExpectOwnerDir - } - } - // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this - // is not an error, because an empty zone file is still a zone file. -} - -// zlexer scans the sourcefile and returns tokens on the channel c. -func zlexer(s *scan, c chan lex) { - var l lex - str := make([]byte, maxTok) // Should be enough for any token - stri := 0 // Offset in str (0 means empty) - com := make([]byte, maxTok) // Hold comment text - comi := 0 - quote := false - escape := false - space := false - commt := false - rrtype := false - owner := true - brace := 0 - x, err := s.tokenText() - defer close(c) - for err == nil { - l.column = s.position.Column - l.line = s.position.Line - if stri >= maxTok { - l.token = "token length insufficient for parsing" - l.err = true - debug.Printf("[%+v]", l.token) - c <- l - return - } - if comi >= maxTok { - l.token = "comment length insufficient for parsing" - l.err = true - debug.Printf("[%+v]", l.token) - c <- l - return - } - - switch x { - case ' ', '\t': - if escape { - escape = false - str[stri] = x - stri++ - break - } - if quote { - // Inside quotes this is legal - str[stri] = x - stri++ - break - } - if commt { - com[comi] = x - comi++ - break - } - if stri == 0 { - // Space directly in the beginning, handled in the grammar - } else if owner { - // If we have a string and its the first, make it an owner - l.value = zOwner - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - // escape $... start with a \ not a $, so this will work - switch l.tokenUpper { - case "$TTL": - l.value = zDirTtl - case "$ORIGIN": - l.value = zDirOrigin - case "$INCLUDE": - l.value = zDirInclude - case "$GENERATE": - l.value = zDirGenerate - } - debug.Printf("[7 %+v]", l.token) - c <- l - } else { - l.value = zString - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - if !rrtype { - if t, ok := StringToType[l.tokenUpper]; ok { - l.value = zRrtpe - l.torc = t - rrtype = true - } else { - if strings.HasPrefix(l.tokenUpper, "TYPE") { - t, ok := typeToInt(l.token) - if !ok { - l.token = "unknown RR type" - l.err = true - c <- l - return - } - l.value = zRrtpe - l.torc = t - } - } - if t, ok := StringToClass[l.tokenUpper]; ok { - l.value = zClass - l.torc = t - } else { - if strings.HasPrefix(l.tokenUpper, "CLASS") { - t, ok := classToInt(l.token) - if !ok { - l.token = "unknown class" - l.err = true - c <- l - return - } - l.value = zClass - l.torc = t - } - } - } - debug.Printf("[6 %+v]", l.token) - c <- l - } - stri = 0 - // I reverse space stuff here - if !space && !commt { - l.value = zBlank - l.token = " " - l.length = 1 - debug.Printf("[5 %+v]", l.token) - c <- l - } - owner = false - space = true - case ';': - if escape { - escape = false - str[stri] = x - stri++ - break - } - if quote { - // Inside quotes this is legal - str[stri] = x - stri++ - break - } - if stri > 0 { - l.value = zString - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - debug.Printf("[4 %+v]", l.token) - c <- l - stri = 0 - } - commt = true - com[comi] = ';' - comi++ - case '\r': - escape = false - if quote { - str[stri] = x - stri++ - break - } - // discard if outside of quotes - case '\n': - escape = false - // Escaped newline - if quote { - str[stri] = x - stri++ - break - } - // inside quotes this is legal - if commt { - // Reset a comment - commt = false - rrtype = false - stri = 0 - // If not in a brace this ends the comment AND the RR - if brace == 0 { - owner = true - owner = true - l.value = zNewline - l.token = "\n" - l.tokenUpper = l.token - l.length = 1 - l.comment = string(com[:comi]) - debug.Printf("[3 %+v %+v]", l.token, l.comment) - c <- l - l.comment = "" - comi = 0 - break - } - com[comi] = ' ' // convert newline to space - comi++ - break - } - - if brace == 0 { - // If there is previous text, we should output it here - if stri != 0 { - l.value = zString - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - - l.length = stri - if !rrtype { - if t, ok := StringToType[l.tokenUpper]; ok { - l.value = zRrtpe - l.torc = t - rrtype = true - } - } - debug.Printf("[2 %+v]", l.token) - c <- l - } - l.value = zNewline - l.token = "\n" - l.tokenUpper = l.token - l.length = 1 - debug.Printf("[1 %+v]", l.token) - c <- l - stri = 0 - commt = false - rrtype = false - owner = true - comi = 0 - } - case '\\': - // comments do not get escaped chars, everything is copied - if commt { - com[comi] = x - comi++ - break - } - // something already escaped must be in string - if escape { - str[stri] = x - stri++ - escape = false - break - } - // something escaped outside of string gets added to string - str[stri] = x - stri++ - escape = true - case '"': - if commt { - com[comi] = x - comi++ - break - } - if escape { - str[stri] = x - stri++ - escape = false - break - } - space = false - // send previous gathered text and the quote - if stri != 0 { - l.value = zString - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - - debug.Printf("[%+v]", l.token) - c <- l - stri = 0 - } - - // send quote itself as separate token - l.value = zQuote - l.token = "\"" - l.tokenUpper = l.token - l.length = 1 - c <- l - quote = !quote - case '(', ')': - if commt { - com[comi] = x - comi++ - break - } - if escape { - str[stri] = x - stri++ - escape = false - break - } - if quote { - str[stri] = x - stri++ - break - } - switch x { - case ')': - brace-- - if brace < 0 { - l.token = "extra closing brace" - l.tokenUpper = l.token - l.err = true - debug.Printf("[%+v]", l.token) - c <- l - return - } - case '(': - brace++ - } - default: - escape = false - if commt { - com[comi] = x - comi++ - break - } - str[stri] = x - stri++ - space = false - } - x, err = s.tokenText() - } - if stri > 0 { - // Send remainder - l.token = string(str[:stri]) - l.tokenUpper = strings.ToUpper(l.token) - l.length = stri - l.value = zString - debug.Printf("[%+v]", l.token) - c <- l - } -} - -// Extract the class number from CLASSxx -func classToInt(token string) (uint16, bool) { - offset := 5 - if len(token) < offset+1 { - return 0, false - } - class, ok := strconv.Atoi(token[offset:]) - if ok != nil || class > maxUint16 { - return 0, false - } - return uint16(class), true -} - -// Extract the rr number from TYPExxx -func typeToInt(token string) (uint16, bool) { - offset := 4 - if len(token) < offset+1 { - return 0, false - } - typ, ok := strconv.Atoi(token[offset:]) - if ok != nil || typ > maxUint16 { - return 0, false - } - return uint16(typ), true -} - -// Parse things like 2w, 2m, etc, Return the time in seconds. -func stringToTtl(token string) (uint32, bool) { - s := uint32(0) - i := uint32(0) - for _, c := range token { - switch c { - case 's', 'S': - s += i - i = 0 - case 'm', 'M': - s += i * 60 - i = 0 - case 'h', 'H': - s += i * 60 * 60 - i = 0 - case 'd', 'D': - s += i * 60 * 60 * 24 - i = 0 - case 'w', 'W': - s += i * 60 * 60 * 24 * 7 - i = 0 - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - i *= 10 - i += uint32(c) - '0' - default: - return 0, false - } - } - return s + i, true -} - -// Parse LOC records' [.][mM] into a -// mantissa exponent format. Token should contain the entire -// string (i.e. no spaces allowed) -func stringToCm(token string) (e, m uint8, ok bool) { - if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { - token = token[0 : len(token)-1] - } - s := strings.SplitN(token, ".", 2) - var meters, cmeters, val int - var err error - switch len(s) { - case 2: - if cmeters, err = strconv.Atoi(s[1]); err != nil { - return - } - fallthrough - case 1: - if meters, err = strconv.Atoi(s[0]); err != nil { - return - } - case 0: - // huh? - return 0, 0, false - } - ok = true - if meters > 0 { - e = 2 - val = meters - } else { - e = 0 - val = cmeters - } - for val > 10 { - e++ - val /= 10 - } - if e > 9 { - ok = false - } - m = uint8(val) - return -} - -func appendOrigin(name, origin string) string { - if origin == "." { - return name + origin - } - return name + "." + origin -} - -// LOC record helper function -func locCheckNorth(token string, latitude uint32) (uint32, bool) { - switch token { - case "n", "N": - return LOC_EQUATOR + latitude, true - case "s", "S": - return LOC_EQUATOR - latitude, true - } - return latitude, false -} - -// LOC record helper function -func locCheckEast(token string, longitude uint32) (uint32, bool) { - switch token { - case "e", "E": - return LOC_EQUATOR + longitude, true - case "w", "W": - return LOC_EQUATOR - longitude, true - } - return longitude, false -} - -// "Eat" the rest of the "line". Return potential comments -func slurpRemainder(c chan lex, f string) (*ParseError, string) { - l := <-c - com := "" - switch l.value { - case zBlank: - l = <-c - com = l.comment - if l.value != zNewline && l.value != zEOF { - return &ParseError{f, "garbage after rdata", l}, "" - } - case zNewline: - com = l.comment - case zEOF: - default: - return &ParseError{f, "garbage after rdata", l}, "" - } - return nil, com -} - -// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" -// Used for NID and L64 record. -func stringToNodeID(l lex) (uint64, *ParseError) { - if len(l.token) < 19 { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - // There must be three colons at fixes postitions, if not its a parse error - if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] - u, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - return u, nil -} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go deleted file mode 100644 index 675fc80d..00000000 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ /dev/null @@ -1,2179 +0,0 @@ -package dns - -import ( - "encoding/base64" - "net" - "strconv" - "strings" -) - -type parserFunc struct { - // Func defines the function that parses the tokens and returns the RR - // or an error. The last string contains any comments in the line as - // they returned by the lexer as well. - Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string) - // Signals if the RR ending is of variable length, like TXT or records - // that have Hexadecimal or Base64 as their last element in the Rdata. Records - // that have a fixed ending or for instance A, AAAA, SOA and etc. - Variable bool -} - -// Parse the rdata of each rrtype. -// All data from the channel c is either zString or zBlank. -// After the rdata there may come a zBlank and then a zNewline -// or immediately a zNewline. If this is not the case we flag -// an *ParseError: garbage after rdata. -func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - parserfunc, ok := typeToparserFunc[h.Rrtype] - if ok { - r, e, cm := parserfunc.Func(h, c, o, f) - if parserfunc.Variable { - return r, e, cm - } - if e != nil { - return nil, e, "" - } - e, cm = slurpRemainder(c, f) - if e != nil { - return nil, e, "" - } - return r, nil, cm - } - // RFC3957 RR (Unknown RR handling) - return setRFC3597(h, c, o, f) -} - -// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) -// or an error -func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) { - s := "" - l := <-c // zString - for l.value != zNewline && l.value != zEOF { - if l.err { - return s, &ParseError{f, errstr, l}, "" - } - switch l.value { - case zString: - s += l.token - case zBlank: // Ok - default: - return "", &ParseError{f, errstr, l}, "" - } - l = <-c - } - return s, nil, l.comment -} - -// A remainder of the rdata with embedded spaces, return the parsed string slice (sans the spaces) -// or an error -func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) { - // Get the remaining data until we see a zNewline - quote := false - l := <-c - var s []string - if l.err { - return s, &ParseError{f, errstr, l}, "" - } - switch l.value == zQuote { - case true: // A number of quoted string - s = make([]string, 0) - empty := true - for l.value != zNewline && l.value != zEOF { - if l.err { - return nil, &ParseError{f, errstr, l}, "" - } - switch l.value { - case zString: - empty = false - if len(l.token) > 255 { - // split up tokens that are larger than 255 into 255-chunks - sx := []string{} - p, i := 0, 255 - for { - if i <= len(l.token) { - sx = append(sx, l.token[p:i]) - } else { - sx = append(sx, l.token[p:]) - break - - } - p, i = p+255, i+255 - } - s = append(s, sx...) - break - } - - s = append(s, l.token) - case zBlank: - if quote { - // zBlank can only be seen in between txt parts. - return nil, &ParseError{f, errstr, l}, "" - } - case zQuote: - if empty && quote { - s = append(s, "") - } - quote = !quote - empty = true - default: - return nil, &ParseError{f, errstr, l}, "" - } - l = <-c - } - if quote { - return nil, &ParseError{f, errstr, l}, "" - } - case false: // Unquoted text record - s = make([]string, 1) - for l.value != zNewline && l.value != zEOF { - if l.err { - return s, &ParseError{f, errstr, l}, "" - } - s[0] += l.token - l = <-c - } - } - return s, nil, l.comment -} - -func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(A) - rr.Hdr = h - - l := <-c - if l.length == 0 { // Dynamic updates. - return rr, nil, "" - } - rr.A = net.ParseIP(l.token) - if rr.A == nil || l.err { - return nil, &ParseError{f, "bad A A", l}, "" - } - return rr, nil, "" -} - -func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(AAAA) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - rr.AAAA = net.ParseIP(l.token) - if rr.AAAA == nil || l.err { - return nil, &ParseError{f, "bad AAAA AAAA", l}, "" - } - return rr, nil, "" -} - -func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NS) - rr.Hdr = h - - l := <-c - rr.Ns = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Ns = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad NS Ns", l}, "" - } - if rr.Ns[l.length-1] != '.' { - rr.Ns = appendOrigin(rr.Ns, o) - } - return rr, nil, "" -} - -func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(PTR) - rr.Hdr = h - - l := <-c - rr.Ptr = l.token - if l.length == 0 { // dynamic update rr. - return rr, nil, "" - } - if l.token == "@" { - rr.Ptr = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad PTR Ptr", l}, "" - } - if rr.Ptr[l.length-1] != '.' { - rr.Ptr = appendOrigin(rr.Ptr, o) - } - return rr, nil, "" -} - -func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSAPPTR) - rr.Hdr = h - - l := <-c - rr.Ptr = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Ptr = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, "" - } - if rr.Ptr[l.length-1] != '.' { - rr.Ptr = appendOrigin(rr.Ptr, o) - } - return rr, nil, "" -} - -func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RP) - rr.Hdr = h - - l := <-c - rr.Mbox = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mbox = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad RP Mbox", l}, "" - } - if rr.Mbox[l.length-1] != '.' { - rr.Mbox = appendOrigin(rr.Mbox, o) - } - } - <-c // zBlank - l = <-c - rr.Txt = l.token - if l.token == "@" { - rr.Txt = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad RP Txt", l}, "" - } - if rr.Txt[l.length-1] != '.' { - rr.Txt = appendOrigin(rr.Txt, o) - } - return rr, nil, "" -} - -func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MR) - rr.Hdr = h - - l := <-c - rr.Mr = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mr = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MR Mr", l}, "" - } - if rr.Mr[l.length-1] != '.' { - rr.Mr = appendOrigin(rr.Mr, o) - } - return rr, nil, "" -} - -func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MB) - rr.Hdr = h - - l := <-c - rr.Mb = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mb = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MB Mb", l}, "" - } - if rr.Mb[l.length-1] != '.' { - rr.Mb = appendOrigin(rr.Mb, o) - } - return rr, nil, "" -} - -func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MG) - rr.Hdr = h - - l := <-c - rr.Mg = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mg = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MG Mg", l}, "" - } - if rr.Mg[l.length-1] != '.' { - rr.Mg = appendOrigin(rr.Mg, o) - } - return rr, nil, "" -} - -func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(HINFO) - rr.Hdr = h - - chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f) - if e != nil { - return nil, e, c1 - } - - if ln := len(chunks); ln == 0 { - return rr, nil, "" - } else if ln == 1 { - // Can we split it? - if out := strings.Fields(chunks[0]); len(out) > 1 { - chunks = out - } else { - chunks = append(chunks, "") - } - } - - rr.Cpu = chunks[0] - rr.Os = strings.Join(chunks[1:], " ") - - return rr, nil, "" -} - -func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MINFO) - rr.Hdr = h - - l := <-c - rr.Rmail = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Rmail = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MINFO Rmail", l}, "" - } - if rr.Rmail[l.length-1] != '.' { - rr.Rmail = appendOrigin(rr.Rmail, o) - } - } - <-c // zBlank - l = <-c - rr.Email = l.token - if l.token == "@" { - rr.Email = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MINFO Email", l}, "" - } - if rr.Email[l.length-1] != '.' { - rr.Email = appendOrigin(rr.Email, o) - } - return rr, nil, "" -} - -func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MF) - rr.Hdr = h - - l := <-c - rr.Mf = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Mf = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MF Mf", l}, "" - } - if rr.Mf[l.length-1] != '.' { - rr.Mf = appendOrigin(rr.Mf, o) - } - return rr, nil, "" -} - -func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MD) - rr.Hdr = h - - l := <-c - rr.Md = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Md = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MD Md", l}, "" - } - if rr.Md[l.length-1] != '.' { - rr.Md = appendOrigin(rr.Md, o) - } - return rr, nil, "" -} - -func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(MX) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad MX Pref", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Mx = l.token - if l.token == "@" { - rr.Mx = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad MX Mx", l}, "" - } - if rr.Mx[l.length-1] != '.' { - rr.Mx = appendOrigin(rr.Mx, o) - } - return rr, nil, "" -} - -func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RT) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil { - return nil, &ParseError{f, "bad RT Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Host = l.token - if l.token == "@" { - rr.Host = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad RT Host", l}, "" - } - if rr.Host[l.length-1] != '.' { - rr.Host = appendOrigin(rr.Host, o) - } - return rr, nil, "" -} - -func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(AFSDB) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad AFSDB Subtype", l}, "" - } - rr.Subtype = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Hostname = l.token - if l.token == "@" { - rr.Hostname = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad AFSDB Hostname", l}, "" - } - if rr.Hostname[l.length-1] != '.' { - rr.Hostname = appendOrigin(rr.Hostname, o) - } - return rr, nil, "" -} - -func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(X25) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - if l.err { - return nil, &ParseError{f, "bad X25 PSDNAddress", l}, "" - } - rr.PSDNAddress = l.token - return rr, nil, "" -} - -func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(KX) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad KX Pref", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Exchanger = l.token - if l.token == "@" { - rr.Exchanger = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad KX Exchanger", l}, "" - } - if rr.Exchanger[l.length-1] != '.' { - rr.Exchanger = appendOrigin(rr.Exchanger, o) - } - return rr, nil, "" -} - -func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(CNAME) - rr.Hdr = h - - l := <-c - rr.Target = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Target = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad CNAME Target", l}, "" - } - if rr.Target[l.length-1] != '.' { - rr.Target = appendOrigin(rr.Target, o) - } - return rr, nil, "" -} - -func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(DNAME) - rr.Hdr = h - - l := <-c - rr.Target = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Target = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad CNAME Target", l}, "" - } - if rr.Target[l.length-1] != '.' { - rr.Target = appendOrigin(rr.Target, o) - } - return rr, nil, "" -} - -func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SOA) - rr.Hdr = h - - l := <-c - rr.Ns = l.token - if l.length == 0 { - return rr, nil, "" - } - <-c // zBlank - if l.token == "@" { - rr.Ns = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad SOA Ns", l}, "" - } - if rr.Ns[l.length-1] != '.' { - rr.Ns = appendOrigin(rr.Ns, o) - } - } - - l = <-c - rr.Mbox = l.token - if l.token == "@" { - rr.Mbox = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad SOA Mbox", l}, "" - } - if rr.Mbox[l.length-1] != '.' { - rr.Mbox = appendOrigin(rr.Mbox, o) - } - } - <-c // zBlank - - var ( - v uint32 - ok bool - ) - for i := 0; i < 5; i++ { - l = <-c - if l.err { - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" - } - if j, e := strconv.Atoi(l.token); e != nil { - if i == 0 { - // Serial should be a number - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" - } - if v, ok = stringToTtl(l.token); !ok { - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" - - } - } else { - v = uint32(j) - } - switch i { - case 0: - rr.Serial = v - <-c // zBlank - case 1: - rr.Refresh = v - <-c // zBlank - case 2: - rr.Retry = v - <-c // zBlank - case 3: - rr.Expire = v - <-c // zBlank - case 4: - rr.Minttl = v - } - } - return rr, nil, "" -} - -func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SRV) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad SRV Priority", l}, "" - } - rr.Priority = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad SRV Weight", l}, "" - } - rr.Weight = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad SRV Port", l}, "" - } - rr.Port = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Target = l.token - if l.token == "@" { - rr.Target = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad SRV Target", l}, "" - } - if rr.Target[l.length-1] != '.' { - rr.Target = appendOrigin(rr.Target, o) - } - return rr, nil, "" -} - -func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NAPTR) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad NAPTR Order", l}, "" - } - rr.Order = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad NAPTR Preference", l}, "" - } - rr.Preference = uint16(i) - // Flags - <-c // zBlank - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" - } - l = <-c // Either String or Quote - if l.value == zString { - rr.Flags = l.token - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" - } - } else if l.value == zQuote { - rr.Flags = "" - } else { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" - } - - // Service - <-c // zBlank - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" - } - l = <-c // Either String or Quote - if l.value == zString { - rr.Service = l.token - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" - } - } else if l.value == zQuote { - rr.Service = "" - } else { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" - } - - // Regexp - <-c // zBlank - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" - } - l = <-c // Either String or Quote - if l.value == zString { - rr.Regexp = l.token - l = <-c // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" - } - } else if l.value == zQuote { - rr.Regexp = "" - } else { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" - } - // After quote no space?? - <-c // zBlank - l = <-c // zString - rr.Replacement = l.token - if l.token == "@" { - rr.Replacement = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad NAPTR Replacement", l}, "" - } - if rr.Replacement[l.length-1] != '.' { - rr.Replacement = appendOrigin(rr.Replacement, o) - } - return rr, nil, "" -} - -func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TALINK) - rr.Hdr = h - - l := <-c - rr.PreviousName = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.PreviousName = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" - } - if rr.PreviousName[l.length-1] != '.' { - rr.PreviousName = appendOrigin(rr.PreviousName, o) - } - } - <-c // zBlank - l = <-c - rr.NextName = l.token - if l.token == "@" { - rr.NextName = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad TALINK NextName", l}, "" - } - if rr.NextName[l.length-1] != '.' { - rr.NextName = appendOrigin(rr.NextName, o) - } - return rr, nil, "" -} - -func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(LOC) - rr.Hdr = h - // Non zero defaults for LOC record, see RFC 1876, Section 3. - rr.HorizPre = 165 // 10000 - rr.VertPre = 162 // 10 - rr.Size = 18 // 1 - ok := false - // North - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad LOC Latitude", l}, "" - } - rr.Latitude = 1000 * 60 * 60 * uint32(i) - - <-c // zBlank - // Either number, 'N' or 'S' - l = <-c - if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { - goto East - } - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad LOC Latitude minutes", l}, "" - } - rr.Latitude += 1000 * 60 * uint32(i) - - <-c // zBlank - l = <-c - if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Latitude seconds", l}, "" - } else { - rr.Latitude += uint32(1000 * i) - } - <-c // zBlank - // Either number, 'N' or 'S' - l = <-c - if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { - goto East - } - // If still alive, flag an error - return nil, &ParseError{f, "bad LOC Latitude North/South", l}, "" - -East: - // East - <-c // zBlank - l = <-c - if i, e := strconv.Atoi(l.token); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Longitude", l}, "" - } else { - rr.Longitude = 1000 * 60 * 60 * uint32(i) - } - <-c // zBlank - // Either number, 'E' or 'W' - l = <-c - if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { - goto Altitude - } - if i, e := strconv.Atoi(l.token); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Longitude minutes", l}, "" - } else { - rr.Longitude += 1000 * 60 * uint32(i) - } - <-c // zBlank - l = <-c - if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Longitude seconds", l}, "" - } else { - rr.Longitude += uint32(1000 * i) - } - <-c // zBlank - // Either number, 'E' or 'W' - l = <-c - if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { - goto Altitude - } - // If still alive, flag an error - return nil, &ParseError{f, "bad LOC Longitude East/West", l}, "" - -Altitude: - <-c // zBlank - l = <-c - if l.length == 0 || l.err { - return nil, &ParseError{f, "bad LOC Altitude", l}, "" - } - if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { - l.token = l.token[0 : len(l.token)-1] - } - if i, e := strconv.ParseFloat(l.token, 32); e != nil { - return nil, &ParseError{f, "bad LOC Altitude", l}, "" - } else { - rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) - } - - // And now optionally the other values - l = <-c - count := 0 - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zString: - switch count { - case 0: // Size - e, m, ok := stringToCm(l.token) - if !ok { - return nil, &ParseError{f, "bad LOC Size", l}, "" - } - rr.Size = (e & 0x0f) | (m << 4 & 0xf0) - case 1: // HorizPre - e, m, ok := stringToCm(l.token) - if !ok { - return nil, &ParseError{f, "bad LOC HorizPre", l}, "" - } - rr.HorizPre = (e & 0x0f) | (m << 4 & 0xf0) - case 2: // VertPre - e, m, ok := stringToCm(l.token) - if !ok { - return nil, &ParseError{f, "bad LOC VertPre", l}, "" - } - rr.VertPre = (e & 0x0f) | (m << 4 & 0xf0) - } - count++ - case zBlank: - // Ok - default: - return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, "" - } - l = <-c - } - return rr, nil, "" -} - -func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(HIP) - rr.Hdr = h - - // HitLength is not represented - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, "" - } - rr.PublicKeyAlgorithm = uint8(i) - <-c // zBlank - l = <-c // zString - if l.length == 0 || l.err { - return nil, &ParseError{f, "bad HIP Hit", l}, "" - } - rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. - rr.HitLength = uint8(len(rr.Hit)) / 2 - - <-c // zBlank - l = <-c // zString - if l.length == 0 || l.err { - return nil, &ParseError{f, "bad HIP PublicKey", l}, "" - } - rr.PublicKey = l.token // This cannot contain spaces - rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) - - // RendezvousServers (if any) - l = <-c - var xs []string - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zString: - if l.token == "@" { - xs = append(xs, o) - l = <-c - continue - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" - } - if l.token[l.length-1] != '.' { - l.token = appendOrigin(l.token, o) - } - xs = append(xs, l.token) - case zBlank: - // Ok - default: - return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" - } - l = <-c - } - rr.RendezvousServers = xs - return rr, nil, l.comment -} - -func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(CERT) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - if v, ok := StringToCertType[l.token]; ok { - rr.Type = v - } else if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad CERT Type", l}, "" - } else { - rr.Type = uint16(i) - } - <-c // zBlank - l = <-c // zString - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad CERT KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c // zString - if v, ok := StringToAlgorithm[l.token]; ok { - rr.Algorithm = v - } else if i, e := strconv.Atoi(l.token); e != nil { - return nil, &ParseError{f, "bad CERT Algorithm", l}, "" - } else { - rr.Algorithm = uint8(i) - } - s, e1, c1 := endingToString(c, "bad CERT Certificate", f) - if e1 != nil { - return nil, e1, c1 - } - rr.Certificate = s - return rr, nil, c1 -} - -func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(OPENPGPKEY) - rr.Hdr = h - - s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f) - if e != nil { - return nil, e, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setRRSIG(h, c, o, f) - if r != nil { - return &SIG{*r.(*RRSIG)}, e, s - } - return nil, e, s -} - -func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RRSIG) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - if t, ok := StringToType[l.tokenUpper]; !ok { - if strings.HasPrefix(l.tokenUpper, "TYPE") { - t, ok = typeToInt(l.tokenUpper) - if !ok { - return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" - } - rr.TypeCovered = t - } else { - return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" - } - } else { - rr.TypeCovered = t - } - <-c // zBlank - l = <-c - i, err := strconv.Atoi(l.token) - if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - <-c // zBlank - l = <-c - i, err = strconv.Atoi(l.token) - if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG Labels", l}, "" - } - rr.Labels = uint8(i) - <-c // zBlank - l = <-c - i, err = strconv.Atoi(l.token) - if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" - } - rr.OrigTtl = uint32(i) - <-c // zBlank - l = <-c - if i, err := StringToTime(l.token); err != nil { - // Try to see if all numeric and use it as epoch - if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { - // TODO(miek): error out on > MAX_UINT32, same below - rr.Expiration = uint32(i) - } else { - return nil, &ParseError{f, "bad RRSIG Expiration", l}, "" - } - } else { - rr.Expiration = i - } - <-c // zBlank - l = <-c - if i, err := StringToTime(l.token); err != nil { - if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { - rr.Inception = uint32(i) - } else { - return nil, &ParseError{f, "bad RRSIG Inception", l}, "" - } - } else { - rr.Inception = i - } - <-c // zBlank - l = <-c - i, err = strconv.Atoi(l.token) - if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c - rr.SignerName = l.token - if l.token == "@" { - rr.SignerName = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" - } - if rr.SignerName[l.length-1] != '.' { - rr.SignerName = appendOrigin(rr.SignerName, o) - } - } - s, e, c1 := endingToString(c, "bad RRSIG Signature", f) - if e != nil { - return nil, e, c1 - } - rr.Signature = s - return rr, nil, c1 -} - -func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSEC) - rr.Hdr = h - - l := <-c - rr.NextDomain = l.token - if l.length == 0 { - return rr, nil, l.comment - } - if l.token == "@" { - rr.NextDomain = o - } else { - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" - } - if rr.NextDomain[l.length-1] != '.' { - rr.NextDomain = appendOrigin(rr.NextDomain, o) - } - } - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l = <-c - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - if k, ok = StringToType[l.tokenUpper]; !ok { - if k, ok = typeToInt(l.tokenUpper); !ok { - return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" - } - l = <-c - } - return rr, nil, l.comment -} - -func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSEC3) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" - } - rr.Hash = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" - } - rr.Flags = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" - } - rr.Iterations = uint16(i) - <-c - l = <-c - if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" - } - rr.SaltLength = uint8(len(l.token)) / 2 - rr.Salt = l.token - - <-c - l = <-c - if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, "" - } - rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) - rr.NextDomain = l.token - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l = <-c - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - if k, ok = StringToType[l.tokenUpper]; !ok { - if k, ok = typeToInt(l.tokenUpper); !ok { - return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" - } - l = <-c - } - return rr, nil, l.comment -} - -func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NSEC3PARAM) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" - } - rr.Hash = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" - } - rr.Flags = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" - } - rr.Iterations = uint16(i) - <-c - l = <-c - rr.SaltLength = uint8(len(l.token)) - rr.Salt = l.token - return rr, nil, "" -} - -func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(EUI48) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - if l.length != 17 || l.err { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" - } - addr := make([]byte, 12) - dash := 0 - for i := 0; i < 10; i += 2 { - addr[i] = l.token[i+dash] - addr[i+1] = l.token[i+1+dash] - dash++ - if l.token[i+1+dash] != '-' { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" - } - } - addr[10] = l.token[15] - addr[11] = l.token[16] - - i, e := strconv.ParseUint(string(addr), 16, 48) - if e != nil { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" - } - rr.Address = i - return rr, nil, "" -} - -func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(EUI64) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - if l.length != 23 || l.err { - return nil, &ParseError{f, "bad EUI64 Address", l}, "" - } - addr := make([]byte, 16) - dash := 0 - for i := 0; i < 14; i += 2 { - addr[i] = l.token[i+dash] - addr[i+1] = l.token[i+1+dash] - dash++ - if l.token[i+1+dash] != '-' { - return nil, &ParseError{f, "bad EUI64 Address", l}, "" - } - } - addr[14] = l.token[21] - addr[15] = l.token[22] - - i, e := strconv.ParseUint(string(addr), 16, 64) - if e != nil { - return nil, &ParseError{f, "bad EUI68 Address", l}, "" - } - rr.Address = uint64(i) - return rr, nil, "" -} - -func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SSHFP) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad SSHFP Type", l}, "" - } - rr.Type = uint8(i) - <-c // zBlank - s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f) - if e1 != nil { - return nil, e1, c1 - } - rr.FingerPrint = s - return rr, nil, "" -} - -func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { - rr := new(DNSKEY) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" - } - rr.Flags = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" - } - rr.Protocol = uint8(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f) - if e1 != nil { - return nil, e1, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "KEY") - if r != nil { - return &KEY{*r.(*DNSKEY)}, e, s - } - return nil, e, s -} - -func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") - return r, e, s -} - -func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") - if r != nil { - return &CDNSKEY{*r.(*DNSKEY)}, e, s - } - return nil, e, s -} - -func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RKEY) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad RKEY Flags", l}, "" - } - rr.Flags = uint16(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad RKEY Protocol", l}, "" - } - rr.Protocol = uint8(i) - <-c // zBlank - l = <-c // zString - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f) - if e1 != nil { - return nil, e1, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(EID) - rr.Hdr = h - s, e, c1 := endingToString(c, "bad EID Endpoint", f) - if e != nil { - return nil, e, c1 - } - rr.Endpoint = s - return rr, nil, c1 -} - -func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NIMLOC) - rr.Hdr = h - s, e, c1 := endingToString(c, "bad NIMLOC Locator", f) - if e != nil { - return nil, e, c1 - } - rr.Locator = s - return rr, nil, c1 -} - -func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(GPOS) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, "" - } - _, e := strconv.ParseFloat(l.token, 64) - if e != nil || l.err { - return nil, &ParseError{f, "bad GPOS Longitude", l}, "" - } - rr.Longitude = l.token - <-c // zBlank - l = <-c - _, e = strconv.ParseFloat(l.token, 64) - if e != nil || l.err { - return nil, &ParseError{f, "bad GPOS Latitude", l}, "" - } - rr.Latitude = l.token - <-c // zBlank - l = <-c - _, e = strconv.ParseFloat(l.token, 64) - if e != nil || l.err { - return nil, &ParseError{f, "bad GPOS Altitude", l}, "" - } - rr.Altitude = l.token - return rr, nil, "" -} - -func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { - rr := new(DS) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c - if i, e := strconv.Atoi(l.token); e != nil { - i, ok := StringToAlgorithm[l.tokenUpper] - if !ok || l.err { - return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" - } - rr.Algorithm = i - } else { - rr.Algorithm = uint8(i) - } - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" - } - rr.DigestType = uint8(i) - s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f) - if e1 != nil { - return nil, e1, c1 - } - rr.Digest = s - return rr, nil, c1 -} - -func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "DS") - return r, e, s -} - -func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "DLV") - if r != nil { - return &DLV{*r.(*DS)}, e, s - } - return nil, e, s -} - -func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "CDS") - if r != nil { - return &CDS{*r.(*DS)}, e, s - } - return nil, e, s -} - -func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TA) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad TA KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - <-c // zBlank - l = <-c - if i, e := strconv.Atoi(l.token); e != nil { - i, ok := StringToAlgorithm[l.tokenUpper] - if !ok || l.err { - return nil, &ParseError{f, "bad TA Algorithm", l}, "" - } - rr.Algorithm = i - } else { - rr.Algorithm = uint8(i) - } - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad TA DigestType", l}, "" - } - rr.DigestType = uint8(i) - s, e, c1 := endingToString(c, "bad TA Digest", f) - if e != nil { - return nil, e.(*ParseError), c1 - } - rr.Digest = s - return rr, nil, c1 -} - -func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TLSA) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad TLSA Usage", l}, "" - } - rr.Usage = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad TLSA Selector", l}, "" - } - rr.Selector = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" - } - rr.MatchingType = uint8(i) - // So this needs be e2 (i.e. different than e), because...??t - s, e2, c1 := endingToString(c, "bad TLSA Certificate", f) - if e2 != nil { - return nil, e2, c1 - } - rr.Certificate = s - return rr, nil, c1 -} - -func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SMIMEA) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA Usage", l}, "" - } - rr.Usage = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA Selector", l}, "" - } - rr.Selector = uint8(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, "" - } - rr.MatchingType = uint8(i) - // So this needs be e2 (i.e. different than e), because...??t - s, e2, c1 := endingToString(c, "bad SMIMEA Certificate", f) - if e2 != nil { - return nil, e2, c1 - } - rr.Certificate = s - return rr, nil, c1 -} - -func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(RFC3597) - rr.Hdr = h - l := <-c - if l.token != "\\#" { - return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" - } - <-c // zBlank - l = <-c - rdlength, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, "" - } - - s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f) - if e1 != nil { - return nil, e1, c1 - } - if rdlength*2 != len(s) { - return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" - } - rr.Rdata = s - return rr, nil, c1 -} - -func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(SPF) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f) - if e != nil { - return nil, e, "" - } - rr.Txt = s - return rr, nil, c1 -} - -func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(TXT) - rr.Hdr = h - - // no zBlank reading here, because all this rdata is TXT - s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f) - if e != nil { - return nil, e, "" - } - rr.Txt = s - return rr, nil, c1 -} - -// identical to setTXT -func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NINFO) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f) - if e != nil { - return nil, e, "" - } - rr.ZSData = s - return rr, nil, c1 -} - -func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(URI) - rr.Hdr = h - - l := <-c - if l.length == 0 { // Dynamic updates. - return rr, nil, "" - } - - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad URI Priority", l}, "" - } - rr.Priority = uint16(i) - <-c // zBlank - l = <-c - i, e = strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad URI Weight", l}, "" - } - rr.Weight = uint16(i) - - <-c // zBlank - s, err, c1 := endingToTxtSlice(c, "bad URI Target", f) - if err != nil { - return nil, err, "" - } - if len(s) > 1 { - return nil, &ParseError{f, "bad URI Target", l}, "" - } - rr.Target = s[0] - return rr, nil, c1 -} - -func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - // awesome record to parse! - rr := new(DHCID) - rr.Hdr = h - - s, e, c1 := endingToString(c, "bad DHCID Digest", f) - if e != nil { - return nil, e, c1 - } - rr.Digest = s - return rr, nil, c1 -} - -func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(NID) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad NID Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - u, err := stringToNodeID(l) - if err != nil || l.err { - return nil, err, "" - } - rr.NodeID = u - return rr, nil, "" -} - -func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(L32) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad L32 Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Locator32 = net.ParseIP(l.token) - if rr.Locator32 == nil || l.err { - return nil, &ParseError{f, "bad L32 Locator", l}, "" - } - return rr, nil, "" -} - -func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(LP) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad LP Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Fqdn = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Fqdn = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad LP Fqdn", l}, "" - } - if rr.Fqdn[l.length-1] != '.' { - rr.Fqdn = appendOrigin(rr.Fqdn, o) - } - return rr, nil, "" -} - -func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(L64) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad L64 Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - u, err := stringToNodeID(l) - if err != nil || l.err { - return nil, err, "" - } - rr.Locator64 = u - return rr, nil, "" -} - -func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(UID) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad UID Uid", l}, "" - } - rr.Uid = uint32(i) - return rr, nil, "" -} - -func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(GID) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad GID Gid", l}, "" - } - rr.Gid = uint32(i) - return rr, nil, "" -} - -func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(UINFO) - rr.Hdr = h - s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f) - if e != nil { - return nil, e, "" - } - rr.Uinfo = s[0] // silently discard anything above - return rr, nil, c1 -} - -func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(PX) - rr.Hdr = h - - l := <-c - if l.length == 0 { - return rr, nil, "" - } - i, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad PX Preference", l}, "" - } - rr.Preference = uint16(i) - <-c // zBlank - l = <-c // zString - rr.Map822 = l.token - if l.length == 0 { - return rr, nil, "" - } - if l.token == "@" { - rr.Map822 = o - return rr, nil, "" - } - _, ok := IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad PX Map822", l}, "" - } - if rr.Map822[l.length-1] != '.' { - rr.Map822 = appendOrigin(rr.Map822, o) - } - <-c // zBlank - l = <-c // zString - rr.Mapx400 = l.token - if l.token == "@" { - rr.Mapx400 = o - return rr, nil, "" - } - _, ok = IsDomainName(l.token) - if !ok || l.length == 0 || l.err { - return nil, &ParseError{f, "bad PX Mapx400", l}, "" - } - if rr.Mapx400[l.length-1] != '.' { - rr.Mapx400 = appendOrigin(rr.Mapx400, o) - } - return rr, nil, "" -} - -func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { - rr := new(CAA) - rr.Hdr = h - l := <-c - if l.length == 0 { - return rr, nil, l.comment - } - i, err := strconv.Atoi(l.token) - if err != nil || l.err { - return nil, &ParseError{f, "bad CAA Flag", l}, "" - } - rr.Flag = uint8(i) - - <-c // zBlank - l = <-c // zString - if l.value != zString { - return nil, &ParseError{f, "bad CAA Tag", l}, "" - } - rr.Tag = l.token - - <-c // zBlank - s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f) - if e != nil { - return nil, e, "" - } - if len(s) > 1 { - return nil, &ParseError{f, "bad CAA Value", l}, "" - } - rr.Value = s[0] - return rr, nil, c1 -} - -var typeToparserFunc = map[uint16]parserFunc{ - TypeAAAA: {setAAAA, false}, - TypeAFSDB: {setAFSDB, false}, - TypeA: {setA, false}, - TypeCAA: {setCAA, true}, - TypeCDS: {setCDS, true}, - TypeCDNSKEY: {setCDNSKEY, true}, - TypeCERT: {setCERT, true}, - TypeCNAME: {setCNAME, false}, - TypeDHCID: {setDHCID, true}, - TypeDLV: {setDLV, true}, - TypeDNAME: {setDNAME, false}, - TypeKEY: {setKEY, true}, - TypeDNSKEY: {setDNSKEY, true}, - TypeDS: {setDS, true}, - TypeEID: {setEID, true}, - TypeEUI48: {setEUI48, false}, - TypeEUI64: {setEUI64, false}, - TypeGID: {setGID, false}, - TypeGPOS: {setGPOS, false}, - TypeHINFO: {setHINFO, true}, - TypeHIP: {setHIP, true}, - TypeKX: {setKX, false}, - TypeL32: {setL32, false}, - TypeL64: {setL64, false}, - TypeLOC: {setLOC, true}, - TypeLP: {setLP, false}, - TypeMB: {setMB, false}, - TypeMD: {setMD, false}, - TypeMF: {setMF, false}, - TypeMG: {setMG, false}, - TypeMINFO: {setMINFO, false}, - TypeMR: {setMR, false}, - TypeMX: {setMX, false}, - TypeNAPTR: {setNAPTR, false}, - TypeNID: {setNID, false}, - TypeNIMLOC: {setNIMLOC, true}, - TypeNINFO: {setNINFO, true}, - TypeNSAPPTR: {setNSAPPTR, false}, - TypeNSEC3PARAM: {setNSEC3PARAM, false}, - TypeNSEC3: {setNSEC3, true}, - TypeNSEC: {setNSEC, true}, - TypeNS: {setNS, false}, - TypeOPENPGPKEY: {setOPENPGPKEY, true}, - TypePTR: {setPTR, false}, - TypePX: {setPX, false}, - TypeSIG: {setSIG, true}, - TypeRKEY: {setRKEY, true}, - TypeRP: {setRP, false}, - TypeRRSIG: {setRRSIG, true}, - TypeRT: {setRT, false}, - TypeSMIMEA: {setSMIMEA, true}, - TypeSOA: {setSOA, false}, - TypeSPF: {setSPF, true}, - TypeSRV: {setSRV, false}, - TypeSSHFP: {setSSHFP, true}, - TypeTALINK: {setTALINK, false}, - TypeTA: {setTA, true}, - TypeTLSA: {setTLSA, true}, - TypeTXT: {setTXT, true}, - TypeUID: {setUID, false}, - TypeUINFO: {setUINFO, true}, - TypeURI: {setURI, true}, - TypeX25: {setX25, false}, -} diff --git a/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/miekg/dns/scanner.go deleted file mode 100644 index c29bc2f3..00000000 --- a/vendor/github.com/miekg/dns/scanner.go +++ /dev/null @@ -1,43 +0,0 @@ -package dns - -// Implement a simple scanner, return a byte stream from an io reader. - -import ( - "bufio" - "io" - "text/scanner" -) - -type scan struct { - src *bufio.Reader - position scanner.Position - eof bool // Have we just seen a eof -} - -func scanInit(r io.Reader) *scan { - s := new(scan) - s.src = bufio.NewReader(r) - s.position.Line = 1 - return s -} - -// tokenText returns the next byte from the input -func (s *scan) tokenText() (byte, error) { - c, err := s.src.ReadByte() - if err != nil { - return c, err - } - // delay the newline handling until the next token is delivered, - // fixes off-by-one errors when reporting a parse error. - if s.eof == true { - s.position.Line++ - s.position.Column = 0 - s.eof = false - } - if c == '\n' { - s.eof = true - return c, nil - } - s.position.Column++ - return c, nil -} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go deleted file mode 100644 index 1d40ee56..00000000 --- a/vendor/github.com/miekg/dns/server.go +++ /dev/null @@ -1,734 +0,0 @@ -// DNS server implementation. - -package dns - -import ( - "bytes" - "crypto/tls" - "encoding/binary" - "io" - "net" - "sync" - "time" -) - -// Maximum number of TCP queries before we close the socket. -const maxTCPQueries = 128 - -// Handler is implemented by any value that implements ServeDNS. -type Handler interface { - ServeDNS(w ResponseWriter, r *Msg) -} - -// A ResponseWriter interface is used by an DNS handler to -// construct an DNS response. -type ResponseWriter interface { - // LocalAddr returns the net.Addr of the server - LocalAddr() net.Addr - // RemoteAddr returns the net.Addr of the client that sent the current request. - RemoteAddr() net.Addr - // WriteMsg writes a reply back to the client. - WriteMsg(*Msg) error - // Write writes a raw buffer back to the client. - Write([]byte) (int, error) - // Close closes the connection. - Close() error - // TsigStatus returns the status of the Tsig. - TsigStatus() error - // TsigTimersOnly sets the tsig timers only boolean. - TsigTimersOnly(bool) - // Hijack lets the caller take over the connection. - // After a call to Hijack(), the DNS package will not do anything with the connection. - Hijack() -} - -type response struct { - hijacked bool // connection has been hijacked by handler - tsigStatus error - tsigTimersOnly bool - tsigRequestMAC string - tsigSecret map[string]string // the tsig secrets - udp *net.UDPConn // i/o connection if UDP was used - tcp net.Conn // i/o connection if TCP was used - udpSession *SessionUDP // oob data to get egress interface right - remoteAddr net.Addr // address of the client - writer Writer // writer to output the raw DNS bits -} - -// ServeMux is an DNS request multiplexer. It matches the -// zone name of each incoming request against a list of -// registered patterns add calls the handler for the pattern -// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning -// that queries for the DS record are redirected to the parent zone (if that -// is also registered), otherwise the child gets the query. -// ServeMux is also safe for concurrent access from multiple goroutines. -type ServeMux struct { - z map[string]Handler - m *sync.RWMutex -} - -// NewServeMux allocates and returns a new ServeMux. -func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} } - -// DefaultServeMux is the default ServeMux used by Serve. -var DefaultServeMux = NewServeMux() - -// The HandlerFunc type is an adapter to allow the use of -// ordinary functions as DNS handlers. If f is a function -// with the appropriate signature, HandlerFunc(f) is a -// Handler object that calls f. -type HandlerFunc func(ResponseWriter, *Msg) - -// ServeDNS calls f(w, r). -func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { - f(w, r) -} - -// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. -func HandleFailed(w ResponseWriter, r *Msg) { - m := new(Msg) - m.SetRcode(r, RcodeServerFailure) - // does not matter if this write fails - w.WriteMsg(m) -} - -func failedHandler() Handler { return HandlerFunc(HandleFailed) } - -// ListenAndServe Starts a server on address and network specified Invoke handler -// for incoming queries. -func ListenAndServe(addr string, network string, handler Handler) error { - server := &Server{Addr: addr, Net: network, Handler: handler} - return server.ListenAndServe() -} - -// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in -// http://golang.org/pkg/net/http/#ListenAndServeTLS -func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - - config := tls.Config{ - Certificates: []tls.Certificate{cert}, - } - - server := &Server{ - Addr: addr, - Net: "tcp-tls", - TLSConfig: &config, - Handler: handler, - } - - return server.ListenAndServe() -} - -// ActivateAndServe activates a server with a listener from systemd, -// l and p should not both be non-nil. -// If both l and p are not nil only p will be used. -// Invoke handler for incoming queries. -func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { - server := &Server{Listener: l, PacketConn: p, Handler: handler} - return server.ActivateAndServe() -} - -func (mux *ServeMux) match(q string, t uint16) Handler { - mux.m.RLock() - defer mux.m.RUnlock() - var handler Handler - b := make([]byte, len(q)) // worst case, one label of length q - off := 0 - end := false - for { - l := len(q[off:]) - for i := 0; i < l; i++ { - b[i] = q[off+i] - if b[i] >= 'A' && b[i] <= 'Z' { - b[i] |= ('a' - 'A') - } - } - if h, ok := mux.z[string(b[:l])]; ok { // causes garbage, might want to change the map key - if t != TypeDS { - return h - } - // Continue for DS to see if we have a parent too, if so delegeate to the parent - handler = h - } - off, end = NextLabel(q, off) - if end { - break - } - } - // Wildcard match, if we have found nothing try the root zone as a last resort. - if h, ok := mux.z["."]; ok { - return h - } - return handler -} - -// Handle adds a handler to the ServeMux for pattern. -func (mux *ServeMux) Handle(pattern string, handler Handler) { - if pattern == "" { - panic("dns: invalid pattern " + pattern) - } - mux.m.Lock() - mux.z[Fqdn(pattern)] = handler - mux.m.Unlock() -} - -// HandleFunc adds a handler function to the ServeMux for pattern. -func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { - mux.Handle(pattern, HandlerFunc(handler)) -} - -// HandleRemove deregistrars the handler specific for pattern from the ServeMux. -func (mux *ServeMux) HandleRemove(pattern string) { - if pattern == "" { - panic("dns: invalid pattern " + pattern) - } - mux.m.Lock() - delete(mux.z, Fqdn(pattern)) - mux.m.Unlock() -} - -// ServeDNS dispatches the request to the handler whose -// pattern most closely matches the request message. If DefaultServeMux -// is used the correct thing for DS queries is done: a possible parent -// is sought. -// If no handler is found a standard SERVFAIL message is returned -// If the request message does not have exactly one question in the -// question section a SERVFAIL is returned, unlesss Unsafe is true. -func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) { - var h Handler - if len(request.Question) < 1 { // allow more than one question - h = failedHandler() - } else { - if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil { - h = failedHandler() - } - } - h.ServeDNS(w, request) -} - -// Handle registers the handler with the given pattern -// in the DefaultServeMux. The documentation for -// ServeMux explains how patterns are matched. -func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } - -// HandleRemove deregisters the handle with the given pattern -// in the DefaultServeMux. -func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } - -// HandleFunc registers the handler function with the given pattern -// in the DefaultServeMux. -func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { - DefaultServeMux.HandleFunc(pattern, handler) -} - -// Writer writes raw DNS messages; each call to Write should send an entire message. -type Writer interface { - io.Writer -} - -// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message. -type Reader interface { - // ReadTCP reads a raw message from a TCP connection. Implementations may alter - // connection properties, for example the read-deadline. - ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) - // ReadUDP reads a raw message from a UDP connection. Implementations may alter - // connection properties, for example the read-deadline. - ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) -} - -// defaultReader is an adapter for the Server struct that implements the Reader interface -// using the readTCP and readUDP func of the embedded Server. -type defaultReader struct { - *Server -} - -func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { - return dr.readTCP(conn, timeout) -} - -func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { - return dr.readUDP(conn, timeout) -} - -// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. -// Implementations should never return a nil Reader. -type DecorateReader func(Reader) Reader - -// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. -// Implementations should never return a nil Writer. -type DecorateWriter func(Writer) Writer - -// A Server defines parameters for running an DNS server. -type Server struct { - // Address to listen on, ":dns" if empty. - Addr string - // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one - Net string - // TCP Listener to use, this is to aid in systemd's socket activation. - Listener net.Listener - // TLS connection configuration - TLSConfig *tls.Config - // UDP "Listener" to use, this is to aid in systemd's socket activation. - PacketConn net.PacketConn - // Handler to invoke, dns.DefaultServeMux if nil. - Handler Handler - // Default buffer size to use to read incoming UDP messages. If not set - // it defaults to MinMsgSize (512 B). - UDPSize int - // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. - ReadTimeout time.Duration - // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. - WriteTimeout time.Duration - // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). - IdleTimeout func() time.Duration - // Secret(s) for Tsig map[]. - TsigSecret map[string]string - // Unsafe instructs the server to disregard any sanity checks and directly hand the message to - // the handler. It will specifically not check if the query has the QR bit not set. - Unsafe bool - // If NotifyStartedFunc is set it is called once the server has started listening. - NotifyStartedFunc func() - // DecorateReader is optional, allows customization of the process that reads raw DNS messages. - DecorateReader DecorateReader - // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. - DecorateWriter DecorateWriter - - // Graceful shutdown handling - - inFlight sync.WaitGroup - - lock sync.RWMutex - started bool -} - -// ListenAndServe starts a nameserver on the configured address in *Server. -func (srv *Server) ListenAndServe() error { - srv.lock.Lock() - defer srv.lock.Unlock() - if srv.started { - return &Error{err: "server already started"} - } - addr := srv.Addr - if addr == "" { - addr = ":domain" - } - if srv.UDPSize == 0 { - srv.UDPSize = MinMsgSize - } - switch srv.Net { - case "tcp", "tcp4", "tcp6": - a, err := net.ResolveTCPAddr(srv.Net, addr) - if err != nil { - return err - } - l, err := net.ListenTCP(srv.Net, a) - if err != nil { - return err - } - srv.Listener = l - srv.started = true - srv.lock.Unlock() - err = srv.serveTCP(l) - srv.lock.Lock() // to satisfy the defer at the top - return err - case "tcp-tls", "tcp4-tls", "tcp6-tls": - network := "tcp" - if srv.Net == "tcp4-tls" { - network = "tcp4" - } else if srv.Net == "tcp6" { - network = "tcp6" - } - - l, err := tls.Listen(network, addr, srv.TLSConfig) - if err != nil { - return err - } - srv.Listener = l - srv.started = true - srv.lock.Unlock() - err = srv.serveTCP(l) - srv.lock.Lock() // to satisfy the defer at the top - return err - case "udp", "udp4", "udp6": - a, err := net.ResolveUDPAddr(srv.Net, addr) - if err != nil { - return err - } - l, err := net.ListenUDP(srv.Net, a) - if err != nil { - return err - } - if e := setUDPSocketOptions(l); e != nil { - return e - } - srv.PacketConn = l - srv.started = true - srv.lock.Unlock() - err = srv.serveUDP(l) - srv.lock.Lock() // to satisfy the defer at the top - return err - } - return &Error{err: "bad network"} -} - -// ActivateAndServe starts a nameserver with the PacketConn or Listener -// configured in *Server. Its main use is to start a server from systemd. -func (srv *Server) ActivateAndServe() error { - srv.lock.Lock() - defer srv.lock.Unlock() - if srv.started { - return &Error{err: "server already started"} - } - pConn := srv.PacketConn - l := srv.Listener - if pConn != nil { - if srv.UDPSize == 0 { - srv.UDPSize = MinMsgSize - } - // Check PacketConn interface's type is valid and value - // is not nil - if t, ok := pConn.(*net.UDPConn); ok && t != nil { - if e := setUDPSocketOptions(t); e != nil { - return e - } - srv.started = true - srv.lock.Unlock() - e := srv.serveUDP(t) - srv.lock.Lock() // to satisfy the defer at the top - return e - } - } - if l != nil { - srv.started = true - srv.lock.Unlock() - e := srv.serveTCP(l) - srv.lock.Lock() // to satisfy the defer at the top - return e - } - return &Error{err: "bad listeners"} -} - -// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and -// ActivateAndServe will return. All in progress queries are completed before the server -// is taken down. If the Shutdown is taking longer than the reading timeout an error -// is returned. -func (srv *Server) Shutdown() error { - srv.lock.Lock() - if !srv.started { - srv.lock.Unlock() - return &Error{err: "server not started"} - } - srv.started = false - srv.lock.Unlock() - - if srv.PacketConn != nil { - srv.PacketConn.Close() - } - if srv.Listener != nil { - srv.Listener.Close() - } - - fin := make(chan bool) - go func() { - srv.inFlight.Wait() - fin <- true - }() - - select { - case <-time.After(srv.getReadTimeout()): - return &Error{err: "server shutdown is pending"} - case <-fin: - return nil - } -} - -// getReadTimeout is a helper func to use system timeout if server did not intend to change it. -func (srv *Server) getReadTimeout() time.Duration { - rtimeout := dnsTimeout - if srv.ReadTimeout != 0 { - rtimeout = srv.ReadTimeout - } - return rtimeout -} - -// serveTCP starts a TCP listener for the server. -// Each request is handled in a separate goroutine. -func (srv *Server) serveTCP(l net.Listener) error { - defer l.Close() - - if srv.NotifyStartedFunc != nil { - srv.NotifyStartedFunc() - } - - reader := Reader(&defaultReader{srv}) - if srv.DecorateReader != nil { - reader = srv.DecorateReader(reader) - } - - handler := srv.Handler - if handler == nil { - handler = DefaultServeMux - } - rtimeout := srv.getReadTimeout() - // deadline is not used here - for { - rw, err := l.Accept() - if err != nil { - if neterr, ok := err.(net.Error); ok && neterr.Temporary() { - continue - } - return err - } - m, err := reader.ReadTCP(rw, rtimeout) - srv.lock.RLock() - if !srv.started { - srv.lock.RUnlock() - return nil - } - srv.lock.RUnlock() - if err != nil { - continue - } - srv.inFlight.Add(1) - go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw) - } -} - -// serveUDP starts a UDP listener for the server. -// Each request is handled in a separate goroutine. -func (srv *Server) serveUDP(l *net.UDPConn) error { - defer l.Close() - - if srv.NotifyStartedFunc != nil { - srv.NotifyStartedFunc() - } - - reader := Reader(&defaultReader{srv}) - if srv.DecorateReader != nil { - reader = srv.DecorateReader(reader) - } - - handler := srv.Handler - if handler == nil { - handler = DefaultServeMux - } - rtimeout := srv.getReadTimeout() - // deadline is not used here - for { - m, s, err := reader.ReadUDP(l, rtimeout) - srv.lock.RLock() - if !srv.started { - srv.lock.RUnlock() - return nil - } - srv.lock.RUnlock() - if err != nil { - continue - } - srv.inFlight.Add(1) - go srv.serve(s.RemoteAddr(), handler, m, l, s, nil) - } -} - -// Serve a new connection. -func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) { - defer srv.inFlight.Done() - - w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s} - if srv.DecorateWriter != nil { - w.writer = srv.DecorateWriter(w) - } else { - w.writer = w - } - - q := 0 // counter for the amount of TCP queries we get - - reader := Reader(&defaultReader{srv}) - if srv.DecorateReader != nil { - reader = srv.DecorateReader(reader) - } -Redo: - req := new(Msg) - err := req.Unpack(m) - if err != nil { // Send a FormatError back - x := new(Msg) - x.SetRcodeFormatError(req) - w.WriteMsg(x) - goto Exit - } - if !srv.Unsafe && req.Response { - goto Exit - } - - w.tsigStatus = nil - if w.tsigSecret != nil { - if t := req.IsTsig(); t != nil { - secret := t.Hdr.Name - if _, ok := w.tsigSecret[secret]; !ok { - w.tsigStatus = ErrKeyAlg - } - w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false) - w.tsigTimersOnly = false - w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC - } - } - h.ServeDNS(w, req) // Writes back to the client - -Exit: - if w.tcp == nil { - return - } - // TODO(miek): make this number configurable? - if q > maxTCPQueries { // close socket after this many queries - w.Close() - return - } - - if w.hijacked { - return // client calls Close() - } - if u != nil { // UDP, "close" and return - w.Close() - return - } - idleTimeout := tcpIdleTimeout - if srv.IdleTimeout != nil { - idleTimeout = srv.IdleTimeout() - } - m, err = reader.ReadTCP(w.tcp, idleTimeout) - if err == nil { - q++ - goto Redo - } - w.Close() - return -} - -func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { - conn.SetReadDeadline(time.Now().Add(timeout)) - l := make([]byte, 2) - n, err := conn.Read(l) - if err != nil || n != 2 { - if err != nil { - return nil, err - } - return nil, ErrShortRead - } - length := binary.BigEndian.Uint16(l) - if length == 0 { - return nil, ErrShortRead - } - m := make([]byte, int(length)) - n, err = conn.Read(m[:int(length)]) - if err != nil || n == 0 { - if err != nil { - return nil, err - } - return nil, ErrShortRead - } - i := n - for i < int(length) { - j, err := conn.Read(m[i:int(length)]) - if err != nil { - return nil, err - } - i += j - } - n = i - m = m[:n] - return m, nil -} - -func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { - conn.SetReadDeadline(time.Now().Add(timeout)) - m := make([]byte, srv.UDPSize) - n, s, err := ReadFromSessionUDP(conn, m) - if err != nil || n == 0 { - if err != nil { - return nil, nil, err - } - return nil, nil, ErrShortRead - } - m = m[:n] - return m, s, nil -} - -// WriteMsg implements the ResponseWriter.WriteMsg method. -func (w *response) WriteMsg(m *Msg) (err error) { - var data []byte - if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) - if t := m.IsTsig(); t != nil { - data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) - if err != nil { - return err - } - _, err = w.writer.Write(data) - return err - } - } - data, err = m.Pack() - if err != nil { - return err - } - _, err = w.writer.Write(data) - return err -} - -// Write implements the ResponseWriter.Write method. -func (w *response) Write(m []byte) (int, error) { - switch { - case w.udp != nil: - n, err := WriteToSessionUDP(w.udp, m, w.udpSession) - return n, err - case w.tcp != nil: - lm := len(m) - if lm < 2 { - return 0, io.ErrShortBuffer - } - if lm > MaxMsgSize { - return 0, &Error{err: "message too large"} - } - l := make([]byte, 2, 2+lm) - binary.BigEndian.PutUint16(l, uint16(lm)) - m = append(l, m...) - - n, err := io.Copy(w.tcp, bytes.NewReader(m)) - return int(n), err - } - panic("not reached") -} - -// LocalAddr implements the ResponseWriter.LocalAddr method. -func (w *response) LocalAddr() net.Addr { - if w.tcp != nil { - return w.tcp.LocalAddr() - } - return w.udp.LocalAddr() -} - -// RemoteAddr implements the ResponseWriter.RemoteAddr method. -func (w *response) RemoteAddr() net.Addr { return w.remoteAddr } - -// TsigStatus implements the ResponseWriter.TsigStatus method. -func (w *response) TsigStatus() error { return w.tsigStatus } - -// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. -func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } - -// Hijack implements the ResponseWriter.Hijack method. -func (w *response) Hijack() { w.hijacked = true } - -// Close implements the ResponseWriter.Close method -func (w *response) Close() error { - // Can't close the udp conn, as that is actually the listener. - if w.tcp != nil { - e := w.tcp.Close() - w.tcp = nil - return e - } - return nil -} diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go deleted file mode 100644 index 2dce06af..00000000 --- a/vendor/github.com/miekg/dns/sig0.go +++ /dev/null @@ -1,219 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "encoding/binary" - "math/big" - "strings" - "time" -) - -// Sign signs a dns.Msg. It fills the signature with the appropriate data. -// The SIG record should have the SignerName, KeyTag, Algorithm, Inception -// and Expiration set. -func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { - if k == nil { - return nil, ErrPrivKey - } - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return nil, ErrKey - } - rr.Header().Rrtype = TypeSIG - rr.Header().Class = ClassANY - rr.Header().Ttl = 0 - rr.Header().Name = "." - rr.OrigTtl = 0 - rr.TypeCovered = 0 - rr.Labels = 0 - - buf := make([]byte, m.Len()+rr.len()) - mbuf, err := m.PackBuffer(buf) - if err != nil { - return nil, err - } - if &buf[0] != &mbuf[0] { - return nil, ErrBuf - } - off, err := PackRR(rr, buf, len(mbuf), nil, false) - if err != nil { - return nil, err - } - buf = buf[:off:cap(buf)] - - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return nil, ErrAlg - } - - hasher := hash.New() - // Write SIG rdata - hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) - // Write message - hasher.Write(buf[:len(mbuf)]) - - signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) - if err != nil { - return nil, err - } - - rr.Signature = toBase64(signature) - sig := string(signature) - - buf = append(buf, sig...) - if len(buf) > int(^uint16(0)) { - return nil, ErrBuf - } - // Adjust sig data length - rdoff := len(mbuf) + 1 + 2 + 2 + 4 - rdlen := binary.BigEndian.Uint16(buf[rdoff:]) - rdlen += uint16(len(sig)) - binary.BigEndian.PutUint16(buf[rdoff:], rdlen) - // Adjust additional count - adc := binary.BigEndian.Uint16(buf[10:]) - adc++ - binary.BigEndian.PutUint16(buf[10:], adc) - return buf, nil -} - -// Verify validates the message buf using the key k. -// It's assumed that buf is a valid message from which rr was unpacked. -func (rr *SIG) Verify(k *KEY, buf []byte) error { - if k == nil { - return ErrKey - } - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return ErrKey - } - - var hash crypto.Hash - switch rr.Algorithm { - case DSA, RSASHA1: - hash = crypto.SHA1 - case RSASHA256, ECDSAP256SHA256: - hash = crypto.SHA256 - case ECDSAP384SHA384: - hash = crypto.SHA384 - case RSASHA512: - hash = crypto.SHA512 - default: - return ErrAlg - } - hasher := hash.New() - - buflen := len(buf) - qdc := binary.BigEndian.Uint16(buf[4:]) - anc := binary.BigEndian.Uint16(buf[6:]) - auc := binary.BigEndian.Uint16(buf[8:]) - adc := binary.BigEndian.Uint16(buf[10:]) - offset := 12 - var err error - for i := uint16(0); i < qdc && offset < buflen; i++ { - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip past Type and Class - offset += 2 + 2 - } - for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip past Type, Class and TTL - offset += 2 + 2 + 4 - if offset+1 >= buflen { - continue - } - var rdlen uint16 - rdlen = binary.BigEndian.Uint16(buf[offset:]) - offset += 2 - offset += int(rdlen) - } - if offset >= buflen { - return &Error{err: "overflowing unpacking signed message"} - } - - // offset should be just prior to SIG - bodyend := offset - // owner name SHOULD be root - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip Type, Class, TTL, RDLen - offset += 2 + 2 + 4 + 2 - sigstart := offset - // Skip Type Covered, Algorithm, Labels, Original TTL - offset += 2 + 1 + 1 + 4 - if offset+4+4 >= buflen { - return &Error{err: "overflow unpacking signed message"} - } - expire := binary.BigEndian.Uint32(buf[offset:]) - offset += 4 - incept := binary.BigEndian.Uint32(buf[offset:]) - offset += 4 - now := uint32(time.Now().Unix()) - if now < incept || now > expire { - return ErrTime - } - // Skip key tag - offset += 2 - var signername string - signername, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // If key has come from the DNS name compression might - // have mangled the case of the name - if strings.ToLower(signername) != strings.ToLower(k.Header().Name) { - return &Error{err: "signer name doesn't match key name"} - } - sigend := offset - hasher.Write(buf[sigstart:sigend]) - hasher.Write(buf[:10]) - hasher.Write([]byte{ - byte((adc - 1) << 8), - byte(adc - 1), - }) - hasher.Write(buf[12:bodyend]) - - hashed := hasher.Sum(nil) - sig := buf[sigend:] - switch k.Algorithm { - case DSA: - pk := k.publicKeyDSA() - sig = sig[1:] - r := big.NewInt(0) - r.SetBytes(sig[:len(sig)/2]) - s := big.NewInt(0) - s.SetBytes(sig[len(sig)/2:]) - if pk != nil { - if dsa.Verify(pk, hashed, r, s) { - return nil - } - return ErrSig - } - case RSASHA1, RSASHA256, RSASHA512: - pk := k.publicKeyRSA() - if pk != nil { - return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) - } - case ECDSAP256SHA256, ECDSAP384SHA384: - pk := k.publicKeyECDSA() - r := big.NewInt(0) - r.SetBytes(sig[:len(sig)/2]) - s := big.NewInt(0) - s.SetBytes(sig[len(sig)/2:]) - if pk != nil { - if ecdsa.Verify(pk, hashed, r, s) { - return nil - } - return ErrSig - } - } - return ErrKeyAlg -} diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go deleted file mode 100644 index 9573c7d0..00000000 --- a/vendor/github.com/miekg/dns/singleinflight.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted for dns package usage by Miek Gieben. - -package dns - -import "sync" -import "time" - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - val *Msg - rtt time.Duration - err error - dups int -} - -// singleflight represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type singleflight struct { - sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { - g.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.Unlock() - c.wg.Wait() - return c.val, c.rtt, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.Unlock() - - c.val, c.rtt, c.err = fn() - c.wg.Done() - - g.Lock() - delete(g.m, key) - g.Unlock() - - return c.val, c.rtt, c.err, c.dups > 0 -} diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go deleted file mode 100644 index 3a4bb570..00000000 --- a/vendor/github.com/miekg/dns/smimea.go +++ /dev/null @@ -1,47 +0,0 @@ -package dns - -import ( - "crypto/sha256" - "crypto/x509" - "encoding/hex" -) - -// Sign creates a SMIMEA record from an SSL certificate. -func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { - r.Hdr.Rrtype = TypeSMIMEA - r.Usage = uint8(usage) - r.Selector = uint8(selector) - r.MatchingType = uint8(matchingType) - - r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err - } - return nil -} - -// Verify verifies a SMIMEA record against an SSL certificate. If it is OK -// a nil error is returned. -func (r *SMIMEA) Verify(cert *x509.Certificate) error { - c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err // Not also ErrSig? - } - if r.Certificate == c { - return nil - } - return ErrSig // ErrSig, really? -} - -// SIMEAName returns the ownername of a SMIMEA resource record as per the -// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3 -func SMIMEAName(email_address string, domain_name string) (string, error) { - hasher := sha256.New() - hasher.Write([]byte(email_address)) - - // RFC Section 3: "The local-part is hashed using the SHA2-256 - // algorithm with the hash truncated to 28 octets and - // represented in its hexadecimal representation to become the - // left-most label in the prepared domain name" - return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain_name, nil -} diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go deleted file mode 100644 index 431e2fb5..00000000 --- a/vendor/github.com/miekg/dns/tlsa.go +++ /dev/null @@ -1,47 +0,0 @@ -package dns - -import ( - "crypto/x509" - "net" - "strconv" -) - -// Sign creates a TLSA record from an SSL certificate. -func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { - r.Hdr.Rrtype = TypeTLSA - r.Usage = uint8(usage) - r.Selector = uint8(selector) - r.MatchingType = uint8(matchingType) - - r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err - } - return nil -} - -// Verify verifies a TLSA record against an SSL certificate. If it is OK -// a nil error is returned. -func (r *TLSA) Verify(cert *x509.Certificate) error { - c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err // Not also ErrSig? - } - if r.Certificate == c { - return nil - } - return ErrSig // ErrSig, really? -} - -// TLSAName returns the ownername of a TLSA resource record as per the -// rules specified in RFC 6698, Section 3. -func TLSAName(name, service, network string) (string, error) { - if !IsFqdn(name) { - return "", ErrFqdn - } - p, err := net.LookupPort(network, service) - if err != nil { - return "", err - } - return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil -} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go deleted file mode 100644 index 78365e1c..00000000 --- a/vendor/github.com/miekg/dns/tsig.go +++ /dev/null @@ -1,384 +0,0 @@ -package dns - -import ( - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/binary" - "encoding/hex" - "hash" - "io" - "strconv" - "strings" - "time" -) - -// HMAC hashing codes. These are transmitted as domain names. -const ( - HmacMD5 = "hmac-md5.sig-alg.reg.int." - HmacSHA1 = "hmac-sha1." - HmacSHA256 = "hmac-sha256." - HmacSHA512 = "hmac-sha512." -) - -// TSIG is the RR the holds the transaction signature of a message. -// See RFC 2845 and RFC 4635. -type TSIG struct { - Hdr RR_Header - Algorithm string `dns:"domain-name"` - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 - MACSize uint16 - MAC string `dns:"size-hex:MACSize"` - OrigId uint16 - Error uint16 - OtherLen uint16 - OtherData string `dns:"size-hex:OtherLen"` -} - -// TSIG has no official presentation format, but this will suffice. - -func (rr *TSIG) String() string { - s := "\n;; TSIG PSEUDOSECTION:\n" - s += rr.Hdr.String() + - " " + rr.Algorithm + - " " + tsigTimeToString(rr.TimeSigned) + - " " + strconv.Itoa(int(rr.Fudge)) + - " " + strconv.Itoa(int(rr.MACSize)) + - " " + strings.ToUpper(rr.MAC) + - " " + strconv.Itoa(int(rr.OrigId)) + - " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR - " " + strconv.Itoa(int(rr.OtherLen)) + - " " + rr.OtherData - return s -} - -// The following values must be put in wireformat, so that the MAC can be calculated. -// RFC 2845, section 3.4.2. TSIG Variables. -type tsigWireFmt struct { - // From RR_Header - Name string `dns:"domain-name"` - Class uint16 - Ttl uint32 - // Rdata of the TSIG - Algorithm string `dns:"domain-name"` - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 - // MACSize, MAC and OrigId excluded - Error uint16 - OtherLen uint16 - OtherData string `dns:"size-hex:OtherLen"` -} - -// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC -type macWireFmt struct { - MACSize uint16 - MAC string `dns:"size-hex:MACSize"` -} - -// 3.3. Time values used in TSIG calculations -type timerWireFmt struct { - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 -} - -// TsigGenerate fills out the TSIG record attached to the message. -// The message should contain -// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), -// time fudge (defaults to 300 seconds) and the current time -// The TSIG MAC is saved in that Tsig RR. -// When TsigGenerate is called for the first time requestMAC is set to the empty string and -// timersOnly is false. -// If something goes wrong an error is returned, otherwise it is nil. -func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { - if m.IsTsig() == nil { - panic("dns: TSIG not last RR in additional") - } - // If we barf here, the caller is to blame - rawsecret, err := fromBase64([]byte(secret)) - if err != nil { - return nil, "", err - } - - rr := m.Extra[len(m.Extra)-1].(*TSIG) - m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg - mbuf, err := m.Pack() - if err != nil { - return nil, "", err - } - buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) - - t := new(TSIG) - var h hash.Hash - switch strings.ToLower(rr.Algorithm) { - case HmacMD5: - h = hmac.New(md5.New, []byte(rawsecret)) - case HmacSHA1: - h = hmac.New(sha1.New, []byte(rawsecret)) - case HmacSHA256: - h = hmac.New(sha256.New, []byte(rawsecret)) - case HmacSHA512: - h = hmac.New(sha512.New, []byte(rawsecret)) - default: - return nil, "", ErrKeyAlg - } - io.WriteString(h, string(buf)) - t.MAC = hex.EncodeToString(h.Sum(nil)) - t.MACSize = uint16(len(t.MAC) / 2) // Size is half! - - t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} - t.Fudge = rr.Fudge - t.TimeSigned = rr.TimeSigned - t.Algorithm = rr.Algorithm - t.OrigId = m.Id - - tbuf := make([]byte, t.len()) - if off, err := PackRR(t, tbuf, 0, nil, false); err == nil { - tbuf = tbuf[:off] // reset to actual size used - } else { - return nil, "", err - } - mbuf = append(mbuf, tbuf...) - // Update the ArCount directly in the buffer. - binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) - - return mbuf, t.MAC, nil -} - -// TsigVerify verifies the TSIG on a message. -// If the signature does not validate err contains the -// error, otherwise it is nil. -func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { - rawsecret, err := fromBase64([]byte(secret)) - if err != nil { - return err - } - // Strip the TSIG from the incoming msg - stripped, tsig, err := stripTsig(msg) - if err != nil { - return err - } - - msgMAC, err := hex.DecodeString(tsig.MAC) - if err != nil { - return err - } - - buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) - - // Fudge factor works both ways. A message can arrive before it was signed because - // of clock skew. - now := uint64(time.Now().Unix()) - ti := now - tsig.TimeSigned - if now < tsig.TimeSigned { - ti = tsig.TimeSigned - now - } - if uint64(tsig.Fudge) < ti { - return ErrTime - } - - var h hash.Hash - switch strings.ToLower(tsig.Algorithm) { - case HmacMD5: - h = hmac.New(md5.New, rawsecret) - case HmacSHA1: - h = hmac.New(sha1.New, rawsecret) - case HmacSHA256: - h = hmac.New(sha256.New, rawsecret) - case HmacSHA512: - h = hmac.New(sha512.New, rawsecret) - default: - return ErrKeyAlg - } - h.Write(buf) - if !hmac.Equal(h.Sum(nil), msgMAC) { - return ErrSig - } - return nil -} - -// Create a wiredata buffer for the MAC calculation. -func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { - var buf []byte - if rr.TimeSigned == 0 { - rr.TimeSigned = uint64(time.Now().Unix()) - } - if rr.Fudge == 0 { - rr.Fudge = 300 // Standard (RFC) default. - } - - if requestMAC != "" { - m := new(macWireFmt) - m.MACSize = uint16(len(requestMAC) / 2) - m.MAC = requestMAC - buf = make([]byte, len(requestMAC)) // long enough - n, _ := packMacWire(m, buf) - buf = buf[:n] - } - - tsigvar := make([]byte, DefaultMsgSize) - if timersOnly { - tsig := new(timerWireFmt) - tsig.TimeSigned = rr.TimeSigned - tsig.Fudge = rr.Fudge - n, _ := packTimerWire(tsig, tsigvar) - tsigvar = tsigvar[:n] - } else { - tsig := new(tsigWireFmt) - tsig.Name = strings.ToLower(rr.Hdr.Name) - tsig.Class = ClassANY - tsig.Ttl = rr.Hdr.Ttl - tsig.Algorithm = strings.ToLower(rr.Algorithm) - tsig.TimeSigned = rr.TimeSigned - tsig.Fudge = rr.Fudge - tsig.Error = rr.Error - tsig.OtherLen = rr.OtherLen - tsig.OtherData = rr.OtherData - n, _ := packTsigWire(tsig, tsigvar) - tsigvar = tsigvar[:n] - } - - if requestMAC != "" { - x := append(buf, msgbuf...) - buf = append(x, tsigvar...) - } else { - buf = append(msgbuf, tsigvar...) - } - return buf -} - -// Strip the TSIG from the raw message. -func stripTsig(msg []byte) ([]byte, *TSIG, error) { - // Copied from msg.go's Unpack() Header, but modified. - var ( - dh Header - err error - ) - off, tsigoff := 0, 0 - - if dh, off, err = unpackMsgHdr(msg, off); err != nil { - return nil, nil, err - } - if dh.Arcount == 0 { - return nil, nil, ErrNoSig - } - - // Rcode, see msg.go Unpack() - if int(dh.Bits&0xF) == RcodeNotAuth { - return nil, nil, ErrAuth - } - - for i := 0; i < int(dh.Qdcount); i++ { - _, off, err = unpackQuestion(msg, off) - if err != nil { - return nil, nil, err - } - } - - _, off, err = unpackRRslice(int(dh.Ancount), msg, off) - if err != nil { - return nil, nil, err - } - _, off, err = unpackRRslice(int(dh.Nscount), msg, off) - if err != nil { - return nil, nil, err - } - - rr := new(TSIG) - var extra RR - for i := 0; i < int(dh.Arcount); i++ { - tsigoff = off - extra, off, err = UnpackRR(msg, off) - if err != nil { - return nil, nil, err - } - if extra.Header().Rrtype == TypeTSIG { - rr = extra.(*TSIG) - // Adjust Arcount. - arcount := binary.BigEndian.Uint16(msg[10:]) - binary.BigEndian.PutUint16(msg[10:], arcount-1) - break - } - } - if rr == nil { - return nil, nil, ErrNoSig - } - return msg[:tsigoff], rr, nil -} - -// Translate the TSIG time signed into a date. There is no -// need for RFC1982 calculations as this date is 48 bits. -func tsigTimeToString(t uint64) string { - ti := time.Unix(int64(t), 0).UTC() - return ti.Format("20060102150405") -} - -func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) { - // copied from zmsg.go TSIG packing - // RR_Header - off, err := PackDomainName(tw.Name, msg, 0, nil, false) - if err != nil { - return off, err - } - off, err = packUint16(tw.Class, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(tw.Ttl, msg, off) - if err != nil { - return off, err - } - - off, err = PackDomainName(tw.Algorithm, msg, off, nil, false) - if err != nil { - return off, err - } - off, err = packUint48(tw.TimeSigned, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(tw.Fudge, msg, off) - if err != nil { - return off, err - } - - off, err = packUint16(tw.Error, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(tw.OtherLen, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(tw.OtherData, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func packMacWire(mw *macWireFmt, msg []byte) (int, error) { - off, err := packUint16(mw.MACSize, msg, 0) - if err != nil { - return off, err - } - off, err = packStringHex(mw.MAC, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) { - off, err := packUint48(tw.TimeSigned, msg, 0) - if err != nil { - return off, err - } - off, err = packUint16(tw.Fudge, msg, off) - if err != nil { - return off, err - } - return off, nil -} diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go deleted file mode 100644 index f63a18b3..00000000 --- a/vendor/github.com/miekg/dns/types.go +++ /dev/null @@ -1,1294 +0,0 @@ -package dns - -import ( - "fmt" - "net" - "strconv" - "strings" - "time" -) - -type ( - // Type is a DNS type. - Type uint16 - // Class is a DNS class. - Class uint16 - // Name is a DNS domain name. - Name string -) - -// Packet formats - -// Wire constants and supported types. -const ( - // valid RR_Header.Rrtype and Question.qtype - - TypeNone uint16 = 0 - TypeA uint16 = 1 - TypeNS uint16 = 2 - TypeMD uint16 = 3 - TypeMF uint16 = 4 - TypeCNAME uint16 = 5 - TypeSOA uint16 = 6 - TypeMB uint16 = 7 - TypeMG uint16 = 8 - TypeMR uint16 = 9 - TypeNULL uint16 = 10 - TypePTR uint16 = 12 - TypeHINFO uint16 = 13 - TypeMINFO uint16 = 14 - TypeMX uint16 = 15 - TypeTXT uint16 = 16 - TypeRP uint16 = 17 - TypeAFSDB uint16 = 18 - TypeX25 uint16 = 19 - TypeISDN uint16 = 20 - TypeRT uint16 = 21 - TypeNSAPPTR uint16 = 23 - TypeSIG uint16 = 24 - TypeKEY uint16 = 25 - TypePX uint16 = 26 - TypeGPOS uint16 = 27 - TypeAAAA uint16 = 28 - TypeLOC uint16 = 29 - TypeNXT uint16 = 30 - TypeEID uint16 = 31 - TypeNIMLOC uint16 = 32 - TypeSRV uint16 = 33 - TypeATMA uint16 = 34 - TypeNAPTR uint16 = 35 - TypeKX uint16 = 36 - TypeCERT uint16 = 37 - TypeDNAME uint16 = 39 - TypeOPT uint16 = 41 // EDNS - TypeDS uint16 = 43 - TypeSSHFP uint16 = 44 - TypeRRSIG uint16 = 46 - TypeNSEC uint16 = 47 - TypeDNSKEY uint16 = 48 - TypeDHCID uint16 = 49 - TypeNSEC3 uint16 = 50 - TypeNSEC3PARAM uint16 = 51 - TypeTLSA uint16 = 52 - TypeSMIMEA uint16 = 53 - TypeHIP uint16 = 55 - TypeNINFO uint16 = 56 - TypeRKEY uint16 = 57 - TypeTALINK uint16 = 58 - TypeCDS uint16 = 59 - TypeCDNSKEY uint16 = 60 - TypeOPENPGPKEY uint16 = 61 - TypeSPF uint16 = 99 - TypeUINFO uint16 = 100 - TypeUID uint16 = 101 - TypeGID uint16 = 102 - TypeUNSPEC uint16 = 103 - TypeNID uint16 = 104 - TypeL32 uint16 = 105 - TypeL64 uint16 = 106 - TypeLP uint16 = 107 - TypeEUI48 uint16 = 108 - TypeEUI64 uint16 = 109 - TypeURI uint16 = 256 - TypeCAA uint16 = 257 - - TypeTKEY uint16 = 249 - TypeTSIG uint16 = 250 - - // valid Question.Qtype only - TypeIXFR uint16 = 251 - TypeAXFR uint16 = 252 - TypeMAILB uint16 = 253 - TypeMAILA uint16 = 254 - TypeANY uint16 = 255 - - TypeTA uint16 = 32768 - TypeDLV uint16 = 32769 - TypeReserved uint16 = 65535 - - // valid Question.Qclass - ClassINET = 1 - ClassCSNET = 2 - ClassCHAOS = 3 - ClassHESIOD = 4 - ClassNONE = 254 - ClassANY = 255 - - // Message Response Codes. - RcodeSuccess = 0 - RcodeFormatError = 1 - RcodeServerFailure = 2 - RcodeNameError = 3 - RcodeNotImplemented = 4 - RcodeRefused = 5 - RcodeYXDomain = 6 - RcodeYXRrset = 7 - RcodeNXRrset = 8 - RcodeNotAuth = 9 - RcodeNotZone = 10 - RcodeBadSig = 16 // TSIG - RcodeBadVers = 16 // EDNS0 - RcodeBadKey = 17 - RcodeBadTime = 18 - RcodeBadMode = 19 // TKEY - RcodeBadName = 20 - RcodeBadAlg = 21 - RcodeBadTrunc = 22 // TSIG - RcodeBadCookie = 23 // DNS Cookies - - // Message Opcodes. There is no 3. - OpcodeQuery = 0 - OpcodeIQuery = 1 - OpcodeStatus = 2 - OpcodeNotify = 4 - OpcodeUpdate = 5 -) - -// Headers is the wire format for the DNS packet header. -type Header struct { - Id uint16 - Bits uint16 - Qdcount, Ancount, Nscount, Arcount uint16 -} - -const ( - headerSize = 12 - - // Header.Bits - _QR = 1 << 15 // query/response (response=1) - _AA = 1 << 10 // authoritative - _TC = 1 << 9 // truncated - _RD = 1 << 8 // recursion desired - _RA = 1 << 7 // recursion available - _Z = 1 << 6 // Z - _AD = 1 << 5 // authticated data - _CD = 1 << 4 // checking disabled - - LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. - LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. - - LOC_HOURS = 60 * 1000 - LOC_DEGREES = 60 * LOC_HOURS - - LOC_ALTITUDEBASE = 100000 -) - -// Different Certificate Types, see RFC 4398, Section 2.1 -const ( - CertPKIX = 1 + iota - CertSPKI - CertPGP - CertIPIX - CertISPKI - CertIPGP - CertACPKIX - CertIACPKIX - CertURI = 253 - CertOID = 254 -) - -// CertTypeToString converts the Cert Type to its string representation. -// See RFC 4398 and RFC 6944. -var CertTypeToString = map[uint16]string{ - CertPKIX: "PKIX", - CertSPKI: "SPKI", - CertPGP: "PGP", - CertIPIX: "IPIX", - CertISPKI: "ISPKI", - CertIPGP: "IPGP", - CertACPKIX: "ACPKIX", - CertIACPKIX: "IACPKIX", - CertURI: "URI", - CertOID: "OID", -} - -// StringToCertType is the reverseof CertTypeToString. -var StringToCertType = reverseInt16(CertTypeToString) - -//go:generate go run types_generate.go - -// Question holds a DNS question. There can be multiple questions in the -// question section of a message. Usually there is just one. -type Question struct { - Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) - Qtype uint16 - Qclass uint16 -} - -func (q *Question) len() int { - return len(q.Name) + 1 + 2 + 2 -} - -func (q *Question) String() (s string) { - // prefix with ; (as in dig) - s = ";" + sprintName(q.Name) + "\t" - s += Class(q.Qclass).String() + "\t" - s += " " + Type(q.Qtype).String() - return s -} - -// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY -// is named "*" there. -type ANY struct { - Hdr RR_Header - // Does not have any rdata -} - -func (rr *ANY) String() string { return rr.Hdr.String() } - -type CNAME struct { - Hdr RR_Header - Target string `dns:"cdomain-name"` -} - -func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } - -type HINFO struct { - Hdr RR_Header - Cpu string - Os string -} - -func (rr *HINFO) String() string { - return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) -} - -type MB struct { - Hdr RR_Header - Mb string `dns:"cdomain-name"` -} - -func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } - -type MG struct { - Hdr RR_Header - Mg string `dns:"cdomain-name"` -} - -func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } - -type MINFO struct { - Hdr RR_Header - Rmail string `dns:"cdomain-name"` - Email string `dns:"cdomain-name"` -} - -func (rr *MINFO) String() string { - return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) -} - -type MR struct { - Hdr RR_Header - Mr string `dns:"cdomain-name"` -} - -func (rr *MR) String() string { - return rr.Hdr.String() + sprintName(rr.Mr) -} - -type MF struct { - Hdr RR_Header - Mf string `dns:"cdomain-name"` -} - -func (rr *MF) String() string { - return rr.Hdr.String() + sprintName(rr.Mf) -} - -type MD struct { - Hdr RR_Header - Md string `dns:"cdomain-name"` -} - -func (rr *MD) String() string { - return rr.Hdr.String() + sprintName(rr.Md) -} - -type MX struct { - Hdr RR_Header - Preference uint16 - Mx string `dns:"cdomain-name"` -} - -func (rr *MX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) -} - -type AFSDB struct { - Hdr RR_Header - Subtype uint16 - Hostname string `dns:"cdomain-name"` -} - -func (rr *AFSDB) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) -} - -type X25 struct { - Hdr RR_Header - PSDNAddress string -} - -func (rr *X25) String() string { - return rr.Hdr.String() + rr.PSDNAddress -} - -type RT struct { - Hdr RR_Header - Preference uint16 - Host string `dns:"cdomain-name"` -} - -func (rr *RT) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) -} - -type NS struct { - Hdr RR_Header - Ns string `dns:"cdomain-name"` -} - -func (rr *NS) String() string { - return rr.Hdr.String() + sprintName(rr.Ns) -} - -type PTR struct { - Hdr RR_Header - Ptr string `dns:"cdomain-name"` -} - -func (rr *PTR) String() string { - return rr.Hdr.String() + sprintName(rr.Ptr) -} - -type RP struct { - Hdr RR_Header - Mbox string `dns:"domain-name"` - Txt string `dns:"domain-name"` -} - -func (rr *RP) String() string { - return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt}) -} - -type SOA struct { - Hdr RR_Header - Ns string `dns:"cdomain-name"` - Mbox string `dns:"cdomain-name"` - Serial uint32 - Refresh uint32 - Retry uint32 - Expire uint32 - Minttl uint32 -} - -func (rr *SOA) String() string { - return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + - " " + strconv.FormatInt(int64(rr.Serial), 10) + - " " + strconv.FormatInt(int64(rr.Refresh), 10) + - " " + strconv.FormatInt(int64(rr.Retry), 10) + - " " + strconv.FormatInt(int64(rr.Expire), 10) + - " " + strconv.FormatInt(int64(rr.Minttl), 10) -} - -type TXT struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -func sprintName(s string) string { - src := []byte(s) - dst := make([]byte, 0, len(src)) - for i := 0; i < len(src); { - if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { - dst = append(dst, src[i:i+2]...) - i += 2 - } else { - b, n := nextByte(src, i) - if n == 0 { - i++ // dangling back slash - } else if b == '.' { - dst = append(dst, b) - } else { - dst = appendDomainNameByte(dst, b) - } - i += n - } - } - return string(dst) -} - -func sprintTxtOctet(s string) string { - src := []byte(s) - dst := make([]byte, 0, len(src)) - dst = append(dst, '"') - for i := 0; i < len(src); { - if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { - dst = append(dst, src[i:i+2]...) - i += 2 - } else { - b, n := nextByte(src, i) - if n == 0 { - i++ // dangling back slash - } else if b == '.' { - dst = append(dst, b) - } else { - if b < ' ' || b > '~' { - dst = appendByte(dst, b) - } else { - dst = append(dst, b) - } - } - i += n - } - } - dst = append(dst, '"') - return string(dst) -} - -func sprintTxt(txt []string) string { - var out []byte - for i, s := range txt { - if i > 0 { - out = append(out, ` "`...) - } else { - out = append(out, '"') - } - bs := []byte(s) - for j := 0; j < len(bs); { - b, n := nextByte(bs, j) - if n == 0 { - break - } - out = appendTXTStringByte(out, b) - j += n - } - out = append(out, '"') - } - return string(out) -} - -func appendDomainNameByte(s []byte, b byte) []byte { - switch b { - case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape - return append(s, '\\', b) - } - return appendTXTStringByte(s, b) -} - -func appendTXTStringByte(s []byte, b byte) []byte { - switch b { - case '\t': - return append(s, '\\', 't') - case '\r': - return append(s, '\\', 'r') - case '\n': - return append(s, '\\', 'n') - case '"', '\\': - return append(s, '\\', b) - } - if b < ' ' || b > '~' { - return appendByte(s, b) - } - return append(s, b) -} - -func appendByte(s []byte, b byte) []byte { - var buf [3]byte - bufs := strconv.AppendInt(buf[:0], int64(b), 10) - s = append(s, '\\') - for i := 0; i < 3-len(bufs); i++ { - s = append(s, '0') - } - for _, r := range bufs { - s = append(s, r) - } - return s -} - -func nextByte(b []byte, offset int) (byte, int) { - if offset >= len(b) { - return 0, 0 - } - if b[offset] != '\\' { - // not an escape sequence - return b[offset], 1 - } - switch len(b) - offset { - case 1: // dangling escape - return 0, 0 - case 2, 3: // too short to be \ddd - default: // maybe \ddd - if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) { - return dddToByte(b[offset+1:]), 4 - } - } - // not \ddd, maybe a control char - switch b[offset+1] { - case 't': - return '\t', 2 - case 'r': - return '\r', 2 - case 'n': - return '\n', 2 - default: - return b[offset+1], 2 - } -} - -type SPF struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -type SRV struct { - Hdr RR_Header - Priority uint16 - Weight uint16 - Port uint16 - Target string `dns:"domain-name"` -} - -func (rr *SRV) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Priority)) + " " + - strconv.Itoa(int(rr.Weight)) + " " + - strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) -} - -type NAPTR struct { - Hdr RR_Header - Order uint16 - Preference uint16 - Flags string - Service string - Regexp string - Replacement string `dns:"domain-name"` -} - -func (rr *NAPTR) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Order)) + " " + - strconv.Itoa(int(rr.Preference)) + " " + - "\"" + rr.Flags + "\" " + - "\"" + rr.Service + "\" " + - "\"" + rr.Regexp + "\" " + - rr.Replacement -} - -// The CERT resource record, see RFC 4398. -type CERT struct { - Hdr RR_Header - Type uint16 - KeyTag uint16 - Algorithm uint8 - Certificate string `dns:"base64"` -} - -func (rr *CERT) String() string { - var ( - ok bool - certtype, algorithm string - ) - if certtype, ok = CertTypeToString[rr.Type]; !ok { - certtype = strconv.Itoa(int(rr.Type)) - } - if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { - algorithm = strconv.Itoa(int(rr.Algorithm)) - } - return rr.Hdr.String() + certtype + - " " + strconv.Itoa(int(rr.KeyTag)) + - " " + algorithm + - " " + rr.Certificate -} - -// The DNAME resource record, see RFC 2672. -type DNAME struct { - Hdr RR_Header - Target string `dns:"domain-name"` -} - -func (rr *DNAME) String() string { - return rr.Hdr.String() + sprintName(rr.Target) -} - -type A struct { - Hdr RR_Header - A net.IP `dns:"a"` -} - -func (rr *A) String() string { - if rr.A == nil { - return rr.Hdr.String() - } - return rr.Hdr.String() + rr.A.String() -} - -type AAAA struct { - Hdr RR_Header - AAAA net.IP `dns:"aaaa"` -} - -func (rr *AAAA) String() string { - if rr.AAAA == nil { - return rr.Hdr.String() - } - return rr.Hdr.String() + rr.AAAA.String() -} - -type PX struct { - Hdr RR_Header - Preference uint16 - Map822 string `dns:"domain-name"` - Mapx400 string `dns:"domain-name"` -} - -func (rr *PX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) -} - -type GPOS struct { - Hdr RR_Header - Longitude string - Latitude string - Altitude string -} - -func (rr *GPOS) String() string { - return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude -} - -type LOC struct { - Hdr RR_Header - Version uint8 - Size uint8 - HorizPre uint8 - VertPre uint8 - Latitude uint32 - Longitude uint32 - Altitude uint32 -} - -// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent -// format and returns a string in m (two decimals for the cm) -func cmToM(m, e uint8) string { - if e < 2 { - if e == 1 { - m *= 10 - } - - return fmt.Sprintf("0.%02d", m) - } - - s := fmt.Sprintf("%d", m) - for e > 2 { - s += "0" - e-- - } - return s -} - -func (rr *LOC) String() string { - s := rr.Hdr.String() - - lat := rr.Latitude - ns := "N" - if lat > LOC_EQUATOR { - lat = lat - LOC_EQUATOR - } else { - ns = "S" - lat = LOC_EQUATOR - lat - } - h := lat / LOC_DEGREES - lat = lat % LOC_DEGREES - m := lat / LOC_HOURS - lat = lat % LOC_HOURS - s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns) - - lon := rr.Longitude - ew := "E" - if lon > LOC_PRIMEMERIDIAN { - lon = lon - LOC_PRIMEMERIDIAN - } else { - ew = "W" - lon = LOC_PRIMEMERIDIAN - lon - } - h = lon / LOC_DEGREES - lon = lon % LOC_DEGREES - m = lon / LOC_HOURS - lon = lon % LOC_HOURS - s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew) - - var alt = float64(rr.Altitude) / 100 - alt -= LOC_ALTITUDEBASE - if rr.Altitude%100 != 0 { - s += fmt.Sprintf("%.2fm ", alt) - } else { - s += fmt.Sprintf("%.0fm ", alt) - } - - s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " - s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " - s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" - - return s -} - -// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931. -type SIG struct { - RRSIG -} - -type RRSIG struct { - Hdr RR_Header - TypeCovered uint16 - Algorithm uint8 - Labels uint8 - OrigTtl uint32 - Expiration uint32 - Inception uint32 - KeyTag uint16 - SignerName string `dns:"domain-name"` - Signature string `dns:"base64"` -} - -func (rr *RRSIG) String() string { - s := rr.Hdr.String() - s += Type(rr.TypeCovered).String() - s += " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.Labels)) + - " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + - " " + TimeToString(rr.Expiration) + - " " + TimeToString(rr.Inception) + - " " + strconv.Itoa(int(rr.KeyTag)) + - " " + sprintName(rr.SignerName) + - " " + rr.Signature - return s -} - -type NSEC struct { - Hdr RR_Header - NextDomain string `dns:"domain-name"` - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *NSEC) String() string { - s := rr.Hdr.String() + sprintName(rr.NextDomain) - for i := 0; i < len(rr.TypeBitMap); i++ { - s += " " + Type(rr.TypeBitMap[i]).String() - } - return s -} - -func (rr *NSEC) len() int { - l := rr.Hdr.len() + len(rr.NextDomain) + 1 - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - -type DLV struct { - DS -} - -type CDS struct { - DS -} - -type DS struct { - Hdr RR_Header - KeyTag uint16 - Algorithm uint8 - DigestType uint8 - Digest string `dns:"hex"` -} - -func (rr *DS) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.DigestType)) + - " " + strings.ToUpper(rr.Digest) -} - -type KX struct { - Hdr RR_Header - Preference uint16 - Exchanger string `dns:"domain-name"` -} - -func (rr *KX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + - " " + sprintName(rr.Exchanger) -} - -type TA struct { - Hdr RR_Header - KeyTag uint16 - Algorithm uint8 - DigestType uint8 - Digest string `dns:"hex"` -} - -func (rr *TA) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.DigestType)) + - " " + strings.ToUpper(rr.Digest) -} - -type TALINK struct { - Hdr RR_Header - PreviousName string `dns:"domain-name"` - NextName string `dns:"domain-name"` -} - -func (rr *TALINK) String() string { - return rr.Hdr.String() + - sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) -} - -type SSHFP struct { - Hdr RR_Header - Algorithm uint8 - Type uint8 - FingerPrint string `dns:"hex"` -} - -func (rr *SSHFP) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.Type)) + - " " + strings.ToUpper(rr.FingerPrint) -} - -type KEY struct { - DNSKEY -} - -type CDNSKEY struct { - DNSKEY -} - -type DNSKEY struct { - Hdr RR_Header - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` -} - -func (rr *DNSKEY) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Protocol)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + rr.PublicKey -} - -type RKEY struct { - Hdr RR_Header - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` -} - -func (rr *RKEY) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Protocol)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + rr.PublicKey -} - -type NSAPPTR struct { - Hdr RR_Header - Ptr string `dns:"domain-name"` -} - -func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } - -type NSEC3 struct { - Hdr RR_Header - Hash uint8 - Flags uint8 - Iterations uint16 - SaltLength uint8 - Salt string `dns:"size-hex:SaltLength"` - HashLength uint8 - NextDomain string `dns:"size-base32:HashLength"` - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *NSEC3) String() string { - s := rr.Hdr.String() - s += strconv.Itoa(int(rr.Hash)) + - " " + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Iterations)) + - " " + saltToString(rr.Salt) + - " " + rr.NextDomain - for i := 0; i < len(rr.TypeBitMap); i++ { - s += " " + Type(rr.TypeBitMap[i]).String() - } - return s -} - -func (rr *NSEC3) len() int { - l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - -type NSEC3PARAM struct { - Hdr RR_Header - Hash uint8 - Flags uint8 - Iterations uint16 - SaltLength uint8 - Salt string `dns:"size-hex:SaltLength"` -} - -func (rr *NSEC3PARAM) String() string { - s := rr.Hdr.String() - s += strconv.Itoa(int(rr.Hash)) + - " " + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Iterations)) + - " " + saltToString(rr.Salt) - return s -} - -type TKEY struct { - Hdr RR_Header - Algorithm string `dns:"domain-name"` - Inception uint32 - Expiration uint32 - Mode uint16 - Error uint16 - KeySize uint16 - Key string - OtherLen uint16 - OtherData string -} - -func (rr *TKEY) String() string { - // It has no presentation format - return "" -} - -// RFC3597 represents an unknown/generic RR. -type RFC3597 struct { - Hdr RR_Header - Rdata string `dns:"hex"` -} - -func (rr *RFC3597) String() string { - // Let's call it a hack - s := rfc3597Header(rr.Hdr) - - s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata - return s -} - -func rfc3597Header(h RR_Header) string { - var s string - - s += sprintName(h.Name) + "\t" - s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" - s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" - s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" - return s -} - -type URI struct { - Hdr RR_Header - Priority uint16 - Weight uint16 - Target string `dns:"octet"` -} - -func (rr *URI) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + - " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) -} - -type DHCID struct { - Hdr RR_Header - Digest string `dns:"base64"` -} - -func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } - -type TLSA struct { - Hdr RR_Header - Usage uint8 - Selector uint8 - MatchingType uint8 - Certificate string `dns:"hex"` -} - -func (rr *TLSA) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Usage)) + - " " + strconv.Itoa(int(rr.Selector)) + - " " + strconv.Itoa(int(rr.MatchingType)) + - " " + rr.Certificate -} - -type SMIMEA struct { - Hdr RR_Header - Usage uint8 - Selector uint8 - MatchingType uint8 - Certificate string `dns:"hex"` -} - -func (rr *SMIMEA) String() string { - s := rr.Hdr.String() + - strconv.Itoa(int(rr.Usage)) + - " " + strconv.Itoa(int(rr.Selector)) + - " " + strconv.Itoa(int(rr.MatchingType)) - - // Every Nth char needs a space on this output. If we output - // this as one giant line, we can't read it can in because in some cases - // the cert length overflows scan.maxTok (2048). - sx := splitN(rr.Certificate, 1024) // conservative value here - s += " " + strings.Join(sx, " ") - return s -} - -type HIP struct { - Hdr RR_Header - HitLength uint8 - PublicKeyAlgorithm uint8 - PublicKeyLength uint16 - Hit string `dns:"size-hex:HitLength"` - PublicKey string `dns:"size-base64:PublicKeyLength"` - RendezvousServers []string `dns:"domain-name"` -} - -func (rr *HIP) String() string { - s := rr.Hdr.String() + - strconv.Itoa(int(rr.PublicKeyAlgorithm)) + - " " + rr.Hit + - " " + rr.PublicKey - for _, d := range rr.RendezvousServers { - s += " " + sprintName(d) - } - return s -} - -type NINFO struct { - Hdr RR_Header - ZSData []string `dns:"txt"` -} - -func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } - -type NID struct { - Hdr RR_Header - Preference uint16 - NodeID uint64 -} - -func (rr *NID) String() string { - s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - node := fmt.Sprintf("%0.16x", rr.NodeID) - s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] - return s -} - -type L32 struct { - Hdr RR_Header - Preference uint16 - Locator32 net.IP `dns:"a"` -} - -func (rr *L32) String() string { - if rr.Locator32 == nil { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - } - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + - " " + rr.Locator32.String() -} - -type L64 struct { - Hdr RR_Header - Preference uint16 - Locator64 uint64 -} - -func (rr *L64) String() string { - s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - node := fmt.Sprintf("%0.16X", rr.Locator64) - s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] - return s -} - -type LP struct { - Hdr RR_Header - Preference uint16 - Fqdn string `dns:"domain-name"` -} - -func (rr *LP) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) -} - -type EUI48 struct { - Hdr RR_Header - Address uint64 `dns:"uint48"` -} - -func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } - -type EUI64 struct { - Hdr RR_Header - Address uint64 -} - -func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } - -type CAA struct { - Hdr RR_Header - Flag uint8 - Tag string - Value string `dns:"octet"` -} - -func (rr *CAA) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) -} - -type UID struct { - Hdr RR_Header - Uid uint32 -} - -func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } - -type GID struct { - Hdr RR_Header - Gid uint32 -} - -func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } - -type UINFO struct { - Hdr RR_Header - Uinfo string -} - -func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } - -type EID struct { - Hdr RR_Header - Endpoint string `dns:"hex"` -} - -func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } - -type NIMLOC struct { - Hdr RR_Header - Locator string `dns:"hex"` -} - -func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } - -type OPENPGPKEY struct { - Hdr RR_Header - PublicKey string `dns:"base64"` -} - -func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } - -// TimeToString translates the RRSIG's incep. and expir. times to the -// string representation used when printing the record. -// It takes serial arithmetic (RFC 1982) into account. -func TimeToString(t uint32) string { - mod := ((int64(t) - time.Now().Unix()) / year68) - 1 - if mod < 0 { - mod = 0 - } - ti := time.Unix(int64(t)-(mod*year68), 0).UTC() - return ti.Format("20060102150405") -} - -// StringToTime translates the RRSIG's incep. and expir. times from -// string values like "20110403154150" to an 32 bit integer. -// It takes serial arithmetic (RFC 1982) into account. -func StringToTime(s string) (uint32, error) { - t, err := time.Parse("20060102150405", s) - if err != nil { - return 0, err - } - mod := (t.Unix() / year68) - 1 - if mod < 0 { - mod = 0 - } - return uint32(t.Unix() - (mod * year68)), nil -} - -// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty. -func saltToString(s string) string { - if len(s) == 0 { - return "-" - } - return strings.ToUpper(s) -} - -func euiToString(eui uint64, bits int) (hex string) { - switch bits { - case 64: - hex = fmt.Sprintf("%16.16x", eui) - hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + - "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] - case 48: - hex = fmt.Sprintf("%12.12x", eui) - hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + - "-" + hex[8:10] + "-" + hex[10:12] - } - return -} - -// copyIP returns a copy of ip. -func copyIP(ip net.IP) net.IP { - p := make(net.IP, len(ip)) - copy(p, ip) - return p -} - -// SplitN splits a string into N sized string chunks. -// This might become an exported function once. -func splitN(s string, n int) []string { - if len(s) < n { - return []string{s} - } - sx := []string{} - p, i := 0, n - for { - if i <= len(s) { - sx = append(sx, s[p:i]) - } else { - sx = append(sx, s[p:]) - break - - } - p, i = p+n, i+n - } - - return sx -} diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go deleted file mode 100644 index c79c6c88..00000000 --- a/vendor/github.com/miekg/dns/udp.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build !windows,!plan9 - -package dns - -import ( - "net" - "syscall" -) - -// SessionUDP holds the remote address and the associated -// out-of-band data. -type SessionUDP struct { - raddr *net.UDPAddr - context []byte -} - -// RemoteAddr returns the remote network address. -func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } - -// setUDPSocketOptions sets the UDP socket options. -// This function is implemented on a per platform basis. See udp_*.go for more details -func setUDPSocketOptions(conn *net.UDPConn) error { - sa, err := getUDPSocketName(conn) - if err != nil { - return err - } - switch sa.(type) { - case *syscall.SockaddrInet6: - v6only, err := getUDPSocketOptions6Only(conn) - if err != nil { - return err - } - setUDPSocketOptions6(conn) - if !v6only { - setUDPSocketOptions4(conn) - } - case *syscall.SockaddrInet4: - setUDPSocketOptions4(conn) - } - return nil -} - -// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a -// net.UDPAddr. -func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { - oob := make([]byte, 40) - n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) - if err != nil { - return n, nil, err - } - return n, &SessionUDP{raddr, oob[:oobn]}, err -} - -// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. -func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) - return n, err -} diff --git a/vendor/github.com/miekg/dns/udp_linux.go b/vendor/github.com/miekg/dns/udp_linux.go deleted file mode 100644 index c62d2188..00000000 --- a/vendor/github.com/miekg/dns/udp_linux.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build linux - -package dns - -// See: -// * http://stackoverflow.com/questions/3062205/setting-the-source-ip-for-a-udp-socket and -// * http://blog.powerdns.com/2012/10/08/on-binding-datagram-udp-sockets-to-the-any-addresses/ -// -// Why do we need this: When listening on 0.0.0.0 with UDP so kernel decides what is the outgoing -// interface, this might not always be the correct one. This code will make sure the egress -// packet's interface matched the ingress' one. - -import ( - "net" - "syscall" -) - -// setUDPSocketOptions4 prepares the v4 socket for sessions. -func setUDPSocketOptions4(conn *net.UDPConn) error { - file, err := conn.File() - if err != nil { - return err - } - if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil { - return err - } - // Calling File() above results in the connection becoming blocking, we must fix that. - // See https://github.com/miekg/dns/issues/279 - err = syscall.SetNonblock(int(file.Fd()), true) - if err != nil { - return err - } - return nil -} - -// setUDPSocketOptions6 prepares the v6 socket for sessions. -func setUDPSocketOptions6(conn *net.UDPConn) error { - file, err := conn.File() - if err != nil { - return err - } - if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil { - return err - } - err = syscall.SetNonblock(int(file.Fd()), true) - if err != nil { - return err - } - return nil -} - -// getUDPSocketOption6Only return true if the socket is v6 only and false when it is v4/v6 combined -// (dualstack). -func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { - file, err := conn.File() - if err != nil { - return false, err - } - // dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections - v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY) - if err != nil { - return false, err - } - return v6only == 1, nil -} - -func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { - file, err := conn.File() - if err != nil { - return nil, err - } - return syscall.Getsockname(int(file.Fd())) -} diff --git a/vendor/github.com/miekg/dns/udp_other.go b/vendor/github.com/miekg/dns/udp_other.go deleted file mode 100644 index d4073244..00000000 --- a/vendor/github.com/miekg/dns/udp_other.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux,!plan9 - -package dns - -import ( - "net" - "syscall" -) - -// These do nothing. See udp_linux.go for an example of how to implement this. - -// We tried to adhire to some kind of naming scheme. - -func setUDPSocketOptions4(conn *net.UDPConn) error { return nil } -func setUDPSocketOptions6(conn *net.UDPConn) error { return nil } -func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil } -func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { return nil, nil } diff --git a/vendor/github.com/miekg/dns/udp_plan9.go b/vendor/github.com/miekg/dns/udp_plan9.go deleted file mode 100644 index b794deeb..00000000 --- a/vendor/github.com/miekg/dns/udp_plan9.go +++ /dev/null @@ -1,34 +0,0 @@ -package dns - -import ( - "net" -) - -func setUDPSocketOptions(conn *net.UDPConn) error { return nil } - -// SessionUDP holds the remote address and the associated -// out-of-band data. -type SessionUDP struct { - raddr *net.UDPAddr - context []byte -} - -// RemoteAddr returns the remote network address. -func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } - -// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a -// net.UDPAddr. -func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { - oob := make([]byte, 40) - n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) - if err != nil { - return n, nil, err - } - return n, &SessionUDP{raddr, oob[:oobn]}, err -} - -// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. -func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) - return n, err -} diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go deleted file mode 100644 index 2ce4b330..00000000 --- a/vendor/github.com/miekg/dns/udp_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build windows - -package dns - -import "net" - -type SessionUDP struct { - raddr *net.UDPAddr -} - -// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a -// net.UDPAddr. -func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { - n, raddr, err := conn.ReadFrom(b) - if err != nil { - return n, nil, err - } - session := &SessionUDP{raddr.(*net.UDPAddr)} - return n, session, err -} - -// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. -func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - n, err := conn.WriteTo(b, session.raddr) - return n, err -} - -func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } - -// setUDPSocketOptions sets the UDP socket options. -// This function is implemented on a per platform basis. See udp_*.go for more details -func setUDPSocketOptions(conn *net.UDPConn) error { - return nil -} diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go deleted file mode 100644 index e90c5c96..00000000 --- a/vendor/github.com/miekg/dns/update.go +++ /dev/null @@ -1,106 +0,0 @@ -package dns - -// NameUsed sets the RRs in the prereq section to -// "Name is in use" RRs. RFC 2136 section 2.4.4. -func (u *Msg) NameUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) - } -} - -// NameNotUsed sets the RRs in the prereq section to -// "Name is in not use" RRs. RFC 2136 section 2.4.5. -func (u *Msg) NameNotUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}) - } -} - -// Used sets the RRs in the prereq section to -// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. -func (u *Msg) Used(rr []RR) { - if len(u.Question) == 0 { - panic("dns: empty question section") - } - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - r.Header().Class = u.Question[0].Qclass - u.Answer = append(u.Answer, r) - } -} - -// RRsetUsed sets the RRs in the prereq section to -// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. -func (u *Msg) RRsetUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) - } -} - -// RRsetNotUsed sets the RRs in the prereq section to -// "RRset does not exist" RRs. RFC 2136 section 2.4.3. -func (u *Msg) RRsetNotUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}}) - } -} - -// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. -func (u *Msg) Insert(rr []RR) { - if len(u.Question) == 0 { - panic("dns: empty question section") - } - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - r.Header().Class = u.Question[0].Qclass - u.Ns = append(u.Ns, r) - } -} - -// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. -func (u *Msg) RemoveRRset(rr []RR) { - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) - } -} - -// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 -func (u *Msg) RemoveName(rr []RR) { - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) - } -} - -// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 -func (u *Msg) Remove(rr []RR) { - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - r.Header().Class = ClassNONE - r.Header().Ttl = 0 - u.Ns = append(u.Ns, r) - } -} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go deleted file mode 100644 index 7346deff..00000000 --- a/vendor/github.com/miekg/dns/xfr.go +++ /dev/null @@ -1,244 +0,0 @@ -package dns - -import ( - "time" -) - -// Envelope is used when doing a zone transfer with a remote server. -type Envelope struct { - RR []RR // The set of RRs in the answer section of the xfr reply message. - Error error // If something went wrong, this contains the error. -} - -// A Transfer defines parameters that are used during a zone transfer. -type Transfer struct { - *Conn - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be fully qualified - tsigTimersOnly bool -} - -// Think we need to away to stop the transfer - -// In performs an incoming transfer with the server in a. -// If you would like to set the source IP, or some other attribute -// of a Dialer for a Transfer, you can do so by specifying the attributes -// in the Transfer.Conn: -// -// d := net.Dialer{LocalAddr: transfer_source} -// con, err := d.Dial("tcp", master) -// dnscon := &dns.Conn{Conn:con} -// transfer = &dns.Transfer{Conn: dnscon} -// channel, err := transfer.In(message, master) -// -func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { - timeout := dnsTimeout - if t.DialTimeout != 0 { - timeout = t.DialTimeout - } - if t.Conn == nil { - t.Conn, err = DialTimeout("tcp", a, timeout) - if err != nil { - return nil, err - } - } - if err := t.WriteMsg(q); err != nil { - return nil, err - } - env = make(chan *Envelope) - go func() { - if q.Question[0].Qtype == TypeAXFR { - go t.inAxfr(q.Id, env) - return - } - if q.Question[0].Qtype == TypeIXFR { - go t.inIxfr(q.Id, env) - return - } - }() - return env, nil -} - -func (t *Transfer) inAxfr(id uint16, c chan *Envelope) { - first := true - defer t.Close() - defer close(c) - timeout := dnsTimeout - if t.ReadTimeout != 0 { - timeout = t.ReadTimeout - } - for { - t.Conn.SetReadDeadline(time.Now().Add(timeout)) - in, err := t.ReadMsg() - if err != nil { - c <- &Envelope{nil, err} - return - } - if id != in.Id { - c <- &Envelope{in.Answer, ErrId} - return - } - if first { - if !isSOAFirst(in) { - c <- &Envelope{in.Answer, ErrSoa} - return - } - first = !first - // only one answer that is SOA, receive more - if len(in.Answer) == 1 { - t.tsigTimersOnly = true - c <- &Envelope{in.Answer, nil} - continue - } - } - - if !first { - t.tsigTimersOnly = true // Subsequent envelopes use this. - if isSOALast(in) { - c <- &Envelope{in.Answer, nil} - return - } - c <- &Envelope{in.Answer, nil} - } - } -} - -func (t *Transfer) inIxfr(id uint16, c chan *Envelope) { - serial := uint32(0) // The first serial seen is the current server serial - first := true - defer t.Close() - defer close(c) - timeout := dnsTimeout - if t.ReadTimeout != 0 { - timeout = t.ReadTimeout - } - for { - t.SetReadDeadline(time.Now().Add(timeout)) - in, err := t.ReadMsg() - if err != nil { - c <- &Envelope{nil, err} - return - } - if id != in.Id { - c <- &Envelope{in.Answer, ErrId} - return - } - if first { - // A single SOA RR signals "no changes" - if len(in.Answer) == 1 && isSOAFirst(in) { - c <- &Envelope{in.Answer, nil} - return - } - - // Check if the returned answer is ok - if !isSOAFirst(in) { - c <- &Envelope{in.Answer, ErrSoa} - return - } - // This serial is important - serial = in.Answer[0].(*SOA).Serial - first = !first - } - - // Now we need to check each message for SOA records, to see what we need to do - if !first { - t.tsigTimersOnly = true - // If the last record in the IXFR contains the servers' SOA, we should quit - if v, ok := in.Answer[len(in.Answer)-1].(*SOA); ok { - if v.Serial == serial { - c <- &Envelope{in.Answer, nil} - return - } - } - c <- &Envelope{in.Answer, nil} - } - } -} - -// Out performs an outgoing transfer with the client connecting in w. -// Basic use pattern: -// -// ch := make(chan *dns.Envelope) -// tr := new(dns.Transfer) -// go tr.Out(w, r, ch) -// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} -// close(ch) -// w.Hijack() -// // w.Close() // Client closes connection -// -// The server is responsible for sending the correct sequence of RRs through the -// channel ch. -func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { - for x := range ch { - r := new(Msg) - // Compress? - r.SetReply(q) - r.Authoritative = true - // assume it fits TODO(miek): fix - r.Answer = append(r.Answer, x.RR...) - if err := w.WriteMsg(r); err != nil { - return err - } - } - w.TsigTimersOnly(true) - return nil -} - -// ReadMsg reads a message from the transfer connection t. -func (t *Transfer) ReadMsg() (*Msg, error) { - m := new(Msg) - p := make([]byte, MaxMsgSize) - n, err := t.Read(p) - if err != nil && n == 0 { - return nil, err - } - p = p[:n] - if err := m.Unpack(p); err != nil { - return nil, err - } - if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { - if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { - return m, ErrSecret - } - // Need to work on the original message p, as that was used to calculate the tsig. - err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) - t.tsigRequestMAC = ts.MAC - } - return m, err -} - -// WriteMsg writes a message through the transfer connection t. -func (t *Transfer) WriteMsg(m *Msg) (err error) { - var out []byte - if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { - if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { - return ErrSecret - } - out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) - } else { - out, err = m.Pack() - } - if err != nil { - return err - } - if _, err = t.Write(out); err != nil { - return err - } - return nil -} - -func isSOAFirst(in *Msg) bool { - if len(in.Answer) > 0 { - return in.Answer[0].Header().Rrtype == TypeSOA - } - return false -} - -func isSOALast(in *Msg) bool { - if len(in.Answer) > 0 { - return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA - } - return false -} diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go deleted file mode 100644 index c561370e..00000000 --- a/vendor/github.com/miekg/dns/zmsg.go +++ /dev/null @@ -1,3529 +0,0 @@ -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from msg_generate.go - -package dns - -// pack*() functions - -func (rr *A) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packDataA(rr.A, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *AAAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packDataAAAA(rr.AAAA, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *AFSDB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Subtype, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Hostname, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Flag, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Tag, msg, off) - if err != nil { - return off, err - } - off, err = packStringOctet(rr.Value, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *CDNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *CDS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *CERT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Type, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.Certificate, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Target, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringBase64(rr.Digest, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *DLV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Target, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *DNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *DS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *EID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringHex(rr.Endpoint, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *EUI48) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint48(rr.Address, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *EUI64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint64(rr.Address, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *GID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint32(rr.Gid, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *GPOS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packString(rr.Longitude, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Latitude, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Altitude, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *HINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packString(rr.Cpu, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Os, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *HIP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.HitLength, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.PublicKeyLength, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Hit, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *KEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Exchanger, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *L32) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packDataA(rr.Locator32, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *L64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packUint64(rr.Locator64, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *LOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Version, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Size, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.HorizPre, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.VertPre, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Latitude, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Longitude, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Altitude, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Fqdn, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mb, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MD) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Md, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mf, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mg, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Rmail, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Email, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mr, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *MX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Mx, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Order, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Service, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Regexp, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Replacement, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packUint64(rr.NodeID, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NIMLOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringHex(rr.Locator, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringTxt(rr.ZSData, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Ns, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.NextDomain, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packDataNsec(rr.TypeBitMap, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Hash, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Iterations, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.SaltLength, msg, off) - if err != nil { - return off, err - } - if rr.Salt == "-" { /* do nothing, empty salt */ - } - if err != nil { - return off, err - } - off, err = packUint8(rr.HashLength, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase32(rr.NextDomain, msg, off) - if err != nil { - return off, err - } - off, err = packDataNsec(rr.TypeBitMap, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Hash, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Iterations, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.SaltLength, msg, off) - if err != nil { - return off, err - } - if rr.Salt == "-" { /* do nothing, empty salt */ - } - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *OPENPGPKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *OPT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packDataOpt(rr.Option, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *PTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Map822, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Mapx400, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *RFC3597) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringHex(rr.Rdata, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *RKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Txt, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.TypeCovered, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Labels, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.OrigTtl, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.SignerName, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.Signature, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *RT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Host, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.TypeCovered, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Labels, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.OrigTtl, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.SignerName, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.Signature, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *SMIMEA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Usage, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Selector, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.MatchingType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Certificate, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Ns, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packUint32(rr.Serial, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Refresh, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Retry, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Expire, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Minttl, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *SPF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringTxt(rr.Txt, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Priority, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Weight, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Port, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.Target, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *SSHFP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Type, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.FingerPrint, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *TA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.PreviousName, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = PackDomainName(rr.NextName, msg, off, compression, compress) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packUint32(rr.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Mode, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Error, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.KeySize, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Key, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.OtherLen, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.OtherData, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *TLSA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint8(rr.Usage, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Selector, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.MatchingType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Certificate, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packUint48(rr.TimeSigned, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Fudge, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.MACSize, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.MAC, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.OrigId, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Error, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.OtherLen, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.OtherData, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *TXT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packStringTxt(rr.Txt, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *UID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint32(rr.Uid, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *UINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packString(rr.Uinfo, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *URI) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packUint16(rr.Priority, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Weight, msg, off) - if err != nil { - return off, err - } - off, err = packStringOctet(rr.Target, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -func (rr *X25) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { - off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, err - } - headerEnd := off - off, err = packString(rr.PSDNAddress, msg, off) - if err != nil { - return off, err - } - rr.Header().Rdlength = uint16(off - headerEnd) - return off, nil -} - -// unpack*() functions - -func unpackA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(A) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.A, off, err = unpackDataA(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackAAAA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(AAAA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.AAAA, off, err = unpackDataAAAA(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackAFSDB(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(AFSDB) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Subtype, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Hostname, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(ANY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - return rr, off, err -} - -func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CAA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flag, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Tag, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Value, off, err = unpackStringOctet(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CDNSKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCDS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CDS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCERT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CERT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Type, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CNAME) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Target, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DHCID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDLV(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DLV) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDNAME(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DNAME) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Target, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DNSKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackEID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(EID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackEUI48(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(EUI48) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Address, off, err = unpackUint48(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackEUI64(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(EUI64) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Address, off, err = unpackUint64(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackGID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(GID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Gid, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackGPOS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(GPOS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Longitude, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Latitude, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Altitude, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackHINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(HINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Cpu, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Os, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackHIP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(HIP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.HitLength, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKeyLength, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) - if err != nil { - return rr, off, err - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) - if err != nil { - return rr, off, err - } - rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(KEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackKX(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(KX) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Exchanger, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackL32(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(L32) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Locator32, off, err = unpackDataA(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackL64(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(L64) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Locator64, off, err = unpackUint64(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackLOC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(LOC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Version, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Size, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.HorizPre, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.VertPre, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Latitude, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Longitude, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Altitude, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackLP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(LP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Fqdn, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMB(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MB) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mb, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMD(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MD) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Md, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMF(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MF) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mf, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mg, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Rmail, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Email, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mr, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMX(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MX) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Mx, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNAPTR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NAPTR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Order, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Flags, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Service, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Regexp, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Replacement, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.NodeID, off, err = unpackUint64(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNIMLOC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NIMLOC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.ZSData, off, err = unpackStringTxt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Ns, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNSAPPTR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSAPPTR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Ptr, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNSEC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSEC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.NextDomain, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.TypeBitMap, off, err = unpackDataNsec(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNSEC3(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSEC3) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Hash, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Flags, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Iterations, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.SaltLength, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) - if err != nil { - return rr, off, err - } - rr.HashLength, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) - if err != nil { - return rr, off, err - } - rr.TypeBitMap, off, err = unpackDataNsec(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSEC3PARAM) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Hash, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Flags, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Iterations, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.SaltLength, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackOPENPGPKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(OPENPGPKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackOPT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(OPT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Option, off, err = unpackDataOpt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackPTR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(PTR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Ptr, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackPX(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(PX) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Map822, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Mapx400, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRFC3597(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RFC3597) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mbox, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Txt, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRRSIG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RRSIG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.TypeCovered, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Labels, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OrigTtl, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Expiration, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Inception, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.SignerName, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Host, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SIG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.TypeCovered, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Labels, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OrigTtl, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Expiration, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Inception, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.SignerName, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSMIMEA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SMIMEA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Usage, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Selector, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MatchingType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SOA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Ns, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Mbox, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Serial, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Refresh, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Retry, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Expire, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Minttl, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSPF(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SPF) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Txt, off, err = unpackStringTxt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSRV(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SRV) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Priority, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Weight, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Port, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Target, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSSHFP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SSHFP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Type, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTALINK(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TALINK) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.PreviousName, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.NextName, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Algorithm, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Inception, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Expiration, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Mode, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Error, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.KeySize, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Key, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OtherLen, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OtherData, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTLSA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TLSA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Usage, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Selector, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MatchingType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTSIG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TSIG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Algorithm, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.TimeSigned, off, err = unpackUint48(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Fudge, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MACSize, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) - if err != nil { - return rr, off, err - } - rr.OrigId, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Error, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OtherLen, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTXT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TXT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Txt, off, err = unpackStringTxt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackUID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(UID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Uid, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackUINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(UINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Uinfo, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackURI(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(URI) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Priority, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Weight, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Target, off, err = unpackStringOctet(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackX25(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(X25) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.PSDNAddress, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ - TypeA: unpackA, - TypeAAAA: unpackAAAA, - TypeAFSDB: unpackAFSDB, - TypeANY: unpackANY, - TypeCAA: unpackCAA, - TypeCDNSKEY: unpackCDNSKEY, - TypeCDS: unpackCDS, - TypeCERT: unpackCERT, - TypeCNAME: unpackCNAME, - TypeDHCID: unpackDHCID, - TypeDLV: unpackDLV, - TypeDNAME: unpackDNAME, - TypeDNSKEY: unpackDNSKEY, - TypeDS: unpackDS, - TypeEID: unpackEID, - TypeEUI48: unpackEUI48, - TypeEUI64: unpackEUI64, - TypeGID: unpackGID, - TypeGPOS: unpackGPOS, - TypeHINFO: unpackHINFO, - TypeHIP: unpackHIP, - TypeKEY: unpackKEY, - TypeKX: unpackKX, - TypeL32: unpackL32, - TypeL64: unpackL64, - TypeLOC: unpackLOC, - TypeLP: unpackLP, - TypeMB: unpackMB, - TypeMD: unpackMD, - TypeMF: unpackMF, - TypeMG: unpackMG, - TypeMINFO: unpackMINFO, - TypeMR: unpackMR, - TypeMX: unpackMX, - TypeNAPTR: unpackNAPTR, - TypeNID: unpackNID, - TypeNIMLOC: unpackNIMLOC, - TypeNINFO: unpackNINFO, - TypeNS: unpackNS, - TypeNSAPPTR: unpackNSAPPTR, - TypeNSEC: unpackNSEC, - TypeNSEC3: unpackNSEC3, - TypeNSEC3PARAM: unpackNSEC3PARAM, - TypeOPENPGPKEY: unpackOPENPGPKEY, - TypeOPT: unpackOPT, - TypePTR: unpackPTR, - TypePX: unpackPX, - TypeRKEY: unpackRKEY, - TypeRP: unpackRP, - TypeRRSIG: unpackRRSIG, - TypeRT: unpackRT, - TypeSIG: unpackSIG, - TypeSMIMEA: unpackSMIMEA, - TypeSOA: unpackSOA, - TypeSPF: unpackSPF, - TypeSRV: unpackSRV, - TypeSSHFP: unpackSSHFP, - TypeTA: unpackTA, - TypeTALINK: unpackTALINK, - TypeTKEY: unpackTKEY, - TypeTLSA: unpackTLSA, - TypeTSIG: unpackTSIG, - TypeTXT: unpackTXT, - TypeUID: unpackUID, - TypeUINFO: unpackUINFO, - TypeURI: unpackURI, - TypeX25: unpackX25, -} diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go deleted file mode 100644 index 3c052773..00000000 --- a/vendor/github.com/miekg/dns/ztypes.go +++ /dev/null @@ -1,842 +0,0 @@ -// *** DO NOT MODIFY *** -// AUTOGENERATED BY go generate from type_generate.go - -package dns - -import ( - "encoding/base64" - "net" -) - -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ - TypeA: func() RR { return new(A) }, - TypeAAAA: func() RR { return new(AAAA) }, - TypeAFSDB: func() RR { return new(AFSDB) }, - TypeANY: func() RR { return new(ANY) }, - TypeCAA: func() RR { return new(CAA) }, - TypeCDNSKEY: func() RR { return new(CDNSKEY) }, - TypeCDS: func() RR { return new(CDS) }, - TypeCERT: func() RR { return new(CERT) }, - TypeCNAME: func() RR { return new(CNAME) }, - TypeDHCID: func() RR { return new(DHCID) }, - TypeDLV: func() RR { return new(DLV) }, - TypeDNAME: func() RR { return new(DNAME) }, - TypeDNSKEY: func() RR { return new(DNSKEY) }, - TypeDS: func() RR { return new(DS) }, - TypeEID: func() RR { return new(EID) }, - TypeEUI48: func() RR { return new(EUI48) }, - TypeEUI64: func() RR { return new(EUI64) }, - TypeGID: func() RR { return new(GID) }, - TypeGPOS: func() RR { return new(GPOS) }, - TypeHINFO: func() RR { return new(HINFO) }, - TypeHIP: func() RR { return new(HIP) }, - TypeKEY: func() RR { return new(KEY) }, - TypeKX: func() RR { return new(KX) }, - TypeL32: func() RR { return new(L32) }, - TypeL64: func() RR { return new(L64) }, - TypeLOC: func() RR { return new(LOC) }, - TypeLP: func() RR { return new(LP) }, - TypeMB: func() RR { return new(MB) }, - TypeMD: func() RR { return new(MD) }, - TypeMF: func() RR { return new(MF) }, - TypeMG: func() RR { return new(MG) }, - TypeMINFO: func() RR { return new(MINFO) }, - TypeMR: func() RR { return new(MR) }, - TypeMX: func() RR { return new(MX) }, - TypeNAPTR: func() RR { return new(NAPTR) }, - TypeNID: func() RR { return new(NID) }, - TypeNIMLOC: func() RR { return new(NIMLOC) }, - TypeNINFO: func() RR { return new(NINFO) }, - TypeNS: func() RR { return new(NS) }, - TypeNSAPPTR: func() RR { return new(NSAPPTR) }, - TypeNSEC: func() RR { return new(NSEC) }, - TypeNSEC3: func() RR { return new(NSEC3) }, - TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, - TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, - TypeOPT: func() RR { return new(OPT) }, - TypePTR: func() RR { return new(PTR) }, - TypePX: func() RR { return new(PX) }, - TypeRKEY: func() RR { return new(RKEY) }, - TypeRP: func() RR { return new(RP) }, - TypeRRSIG: func() RR { return new(RRSIG) }, - TypeRT: func() RR { return new(RT) }, - TypeSIG: func() RR { return new(SIG) }, - TypeSMIMEA: func() RR { return new(SMIMEA) }, - TypeSOA: func() RR { return new(SOA) }, - TypeSPF: func() RR { return new(SPF) }, - TypeSRV: func() RR { return new(SRV) }, - TypeSSHFP: func() RR { return new(SSHFP) }, - TypeTA: func() RR { return new(TA) }, - TypeTALINK: func() RR { return new(TALINK) }, - TypeTKEY: func() RR { return new(TKEY) }, - TypeTLSA: func() RR { return new(TLSA) }, - TypeTSIG: func() RR { return new(TSIG) }, - TypeTXT: func() RR { return new(TXT) }, - TypeUID: func() RR { return new(UID) }, - TypeUINFO: func() RR { return new(UINFO) }, - TypeURI: func() RR { return new(URI) }, - TypeX25: func() RR { return new(X25) }, -} - -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ - TypeA: "A", - TypeAAAA: "AAAA", - TypeAFSDB: "AFSDB", - TypeANY: "ANY", - TypeATMA: "ATMA", - TypeAXFR: "AXFR", - TypeCAA: "CAA", - TypeCDNSKEY: "CDNSKEY", - TypeCDS: "CDS", - TypeCERT: "CERT", - TypeCNAME: "CNAME", - TypeDHCID: "DHCID", - TypeDLV: "DLV", - TypeDNAME: "DNAME", - TypeDNSKEY: "DNSKEY", - TypeDS: "DS", - TypeEID: "EID", - TypeEUI48: "EUI48", - TypeEUI64: "EUI64", - TypeGID: "GID", - TypeGPOS: "GPOS", - TypeHINFO: "HINFO", - TypeHIP: "HIP", - TypeISDN: "ISDN", - TypeIXFR: "IXFR", - TypeKEY: "KEY", - TypeKX: "KX", - TypeL32: "L32", - TypeL64: "L64", - TypeLOC: "LOC", - TypeLP: "LP", - TypeMAILA: "MAILA", - TypeMAILB: "MAILB", - TypeMB: "MB", - TypeMD: "MD", - TypeMF: "MF", - TypeMG: "MG", - TypeMINFO: "MINFO", - TypeMR: "MR", - TypeMX: "MX", - TypeNAPTR: "NAPTR", - TypeNID: "NID", - TypeNIMLOC: "NIMLOC", - TypeNINFO: "NINFO", - TypeNS: "NS", - TypeNSEC: "NSEC", - TypeNSEC3: "NSEC3", - TypeNSEC3PARAM: "NSEC3PARAM", - TypeNULL: "NULL", - TypeNXT: "NXT", - TypeNone: "None", - TypeOPENPGPKEY: "OPENPGPKEY", - TypeOPT: "OPT", - TypePTR: "PTR", - TypePX: "PX", - TypeRKEY: "RKEY", - TypeRP: "RP", - TypeRRSIG: "RRSIG", - TypeRT: "RT", - TypeReserved: "Reserved", - TypeSIG: "SIG", - TypeSMIMEA: "SMIMEA", - TypeSOA: "SOA", - TypeSPF: "SPF", - TypeSRV: "SRV", - TypeSSHFP: "SSHFP", - TypeTA: "TA", - TypeTALINK: "TALINK", - TypeTKEY: "TKEY", - TypeTLSA: "TLSA", - TypeTSIG: "TSIG", - TypeTXT: "TXT", - TypeUID: "UID", - TypeUINFO: "UINFO", - TypeUNSPEC: "UNSPEC", - TypeURI: "URI", - TypeX25: "X25", - TypeNSAPPTR: "NSAP-PTR", -} - -// Header() functions -func (rr *A) Header() *RR_Header { return &rr.Hdr } -func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } -func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } -func (rr *ANY) Header() *RR_Header { return &rr.Hdr } -func (rr *CAA) Header() *RR_Header { return &rr.Hdr } -func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *CDS) Header() *RR_Header { return &rr.Hdr } -func (rr *CERT) Header() *RR_Header { return &rr.Hdr } -func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } -func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } -func (rr *DLV) Header() *RR_Header { return &rr.Hdr } -func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } -func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *DS) Header() *RR_Header { return &rr.Hdr } -func (rr *EID) Header() *RR_Header { return &rr.Hdr } -func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } -func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } -func (rr *GID) Header() *RR_Header { return &rr.Hdr } -func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } -func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *HIP) Header() *RR_Header { return &rr.Hdr } -func (rr *KEY) Header() *RR_Header { return &rr.Hdr } -func (rr *KX) Header() *RR_Header { return &rr.Hdr } -func (rr *L32) Header() *RR_Header { return &rr.Hdr } -func (rr *L64) Header() *RR_Header { return &rr.Hdr } -func (rr *LOC) Header() *RR_Header { return &rr.Hdr } -func (rr *LP) Header() *RR_Header { return &rr.Hdr } -func (rr *MB) Header() *RR_Header { return &rr.Hdr } -func (rr *MD) Header() *RR_Header { return &rr.Hdr } -func (rr *MF) Header() *RR_Header { return &rr.Hdr } -func (rr *MG) Header() *RR_Header { return &rr.Hdr } -func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *MR) Header() *RR_Header { return &rr.Hdr } -func (rr *MX) Header() *RR_Header { return &rr.Hdr } -func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } -func (rr *NID) Header() *RR_Header { return &rr.Hdr } -func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } -func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *NS) Header() *RR_Header { return &rr.Hdr } -func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } -func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *OPT) Header() *RR_Header { return &rr.Hdr } -func (rr *PTR) Header() *RR_Header { return &rr.Hdr } -func (rr *PX) Header() *RR_Header { return &rr.Hdr } -func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } -func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *RP) Header() *RR_Header { return &rr.Hdr } -func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } -func (rr *RT) Header() *RR_Header { return &rr.Hdr } -func (rr *SIG) Header() *RR_Header { return &rr.Hdr } -func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr } -func (rr *SOA) Header() *RR_Header { return &rr.Hdr } -func (rr *SPF) Header() *RR_Header { return &rr.Hdr } -func (rr *SRV) Header() *RR_Header { return &rr.Hdr } -func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } -func (rr *TA) Header() *RR_Header { return &rr.Hdr } -func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } -func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } -func (rr *TSIG) Header() *RR_Header { return &rr.Hdr } -func (rr *TXT) Header() *RR_Header { return &rr.Hdr } -func (rr *UID) Header() *RR_Header { return &rr.Hdr } -func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *URI) Header() *RR_Header { return &rr.Hdr } -func (rr *X25) Header() *RR_Header { return &rr.Hdr } - -// len() functions -func (rr *A) len() int { - l := rr.Hdr.len() - l += net.IPv4len // A - return l -} -func (rr *AAAA) len() int { - l := rr.Hdr.len() - l += net.IPv6len // AAAA - return l -} -func (rr *AFSDB) len() int { - l := rr.Hdr.len() - l += 2 // Subtype - l += len(rr.Hostname) + 1 - return l -} -func (rr *ANY) len() int { - l := rr.Hdr.len() - return l -} -func (rr *CAA) len() int { - l := rr.Hdr.len() - l += 1 // Flag - l += len(rr.Tag) + 1 - l += len(rr.Value) - return l -} -func (rr *CERT) len() int { - l := rr.Hdr.len() - l += 2 // Type - l += 2 // KeyTag - l += 1 // Algorithm - l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) - return l -} -func (rr *CNAME) len() int { - l := rr.Hdr.len() - l += len(rr.Target) + 1 - return l -} -func (rr *DHCID) len() int { - l := rr.Hdr.len() - l += base64.StdEncoding.DecodedLen(len(rr.Digest)) - return l -} -func (rr *DNAME) len() int { - l := rr.Hdr.len() - l += len(rr.Target) + 1 - return l -} -func (rr *DNSKEY) len() int { - l := rr.Hdr.len() - l += 2 // Flags - l += 1 // Protocol - l += 1 // Algorithm - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - return l -} -func (rr *DS) len() int { - l := rr.Hdr.len() - l += 2 // KeyTag - l += 1 // Algorithm - l += 1 // DigestType - l += len(rr.Digest)/2 + 1 - return l -} -func (rr *EID) len() int { - l := rr.Hdr.len() - l += len(rr.Endpoint)/2 + 1 - return l -} -func (rr *EUI48) len() int { - l := rr.Hdr.len() - l += 6 // Address - return l -} -func (rr *EUI64) len() int { - l := rr.Hdr.len() - l += 8 // Address - return l -} -func (rr *GID) len() int { - l := rr.Hdr.len() - l += 4 // Gid - return l -} -func (rr *GPOS) len() int { - l := rr.Hdr.len() - l += len(rr.Longitude) + 1 - l += len(rr.Latitude) + 1 - l += len(rr.Altitude) + 1 - return l -} -func (rr *HINFO) len() int { - l := rr.Hdr.len() - l += len(rr.Cpu) + 1 - l += len(rr.Os) + 1 - return l -} -func (rr *HIP) len() int { - l := rr.Hdr.len() - l += 1 // HitLength - l += 1 // PublicKeyAlgorithm - l += 2 // PublicKeyLength - l += len(rr.Hit)/2 + 1 - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - for _, x := range rr.RendezvousServers { - l += len(x) + 1 - } - return l -} -func (rr *KX) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += len(rr.Exchanger) + 1 - return l -} -func (rr *L32) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += net.IPv4len // Locator32 - return l -} -func (rr *L64) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += 8 // Locator64 - return l -} -func (rr *LOC) len() int { - l := rr.Hdr.len() - l += 1 // Version - l += 1 // Size - l += 1 // HorizPre - l += 1 // VertPre - l += 4 // Latitude - l += 4 // Longitude - l += 4 // Altitude - return l -} -func (rr *LP) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += len(rr.Fqdn) + 1 - return l -} -func (rr *MB) len() int { - l := rr.Hdr.len() - l += len(rr.Mb) + 1 - return l -} -func (rr *MD) len() int { - l := rr.Hdr.len() - l += len(rr.Md) + 1 - return l -} -func (rr *MF) len() int { - l := rr.Hdr.len() - l += len(rr.Mf) + 1 - return l -} -func (rr *MG) len() int { - l := rr.Hdr.len() - l += len(rr.Mg) + 1 - return l -} -func (rr *MINFO) len() int { - l := rr.Hdr.len() - l += len(rr.Rmail) + 1 - l += len(rr.Email) + 1 - return l -} -func (rr *MR) len() int { - l := rr.Hdr.len() - l += len(rr.Mr) + 1 - return l -} -func (rr *MX) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += len(rr.Mx) + 1 - return l -} -func (rr *NAPTR) len() int { - l := rr.Hdr.len() - l += 2 // Order - l += 2 // Preference - l += len(rr.Flags) + 1 - l += len(rr.Service) + 1 - l += len(rr.Regexp) + 1 - l += len(rr.Replacement) + 1 - return l -} -func (rr *NID) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += 8 // NodeID - return l -} -func (rr *NIMLOC) len() int { - l := rr.Hdr.len() - l += len(rr.Locator)/2 + 1 - return l -} -func (rr *NINFO) len() int { - l := rr.Hdr.len() - for _, x := range rr.ZSData { - l += len(x) + 1 - } - return l -} -func (rr *NS) len() int { - l := rr.Hdr.len() - l += len(rr.Ns) + 1 - return l -} -func (rr *NSAPPTR) len() int { - l := rr.Hdr.len() - l += len(rr.Ptr) + 1 - return l -} -func (rr *NSEC3PARAM) len() int { - l := rr.Hdr.len() - l += 1 // Hash - l += 1 // Flags - l += 2 // Iterations - l += 1 // SaltLength - l += len(rr.Salt)/2 + 1 - return l -} -func (rr *OPENPGPKEY) len() int { - l := rr.Hdr.len() - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - return l -} -func (rr *PTR) len() int { - l := rr.Hdr.len() - l += len(rr.Ptr) + 1 - return l -} -func (rr *PX) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += len(rr.Map822) + 1 - l += len(rr.Mapx400) + 1 - return l -} -func (rr *RFC3597) len() int { - l := rr.Hdr.len() - l += len(rr.Rdata)/2 + 1 - return l -} -func (rr *RKEY) len() int { - l := rr.Hdr.len() - l += 2 // Flags - l += 1 // Protocol - l += 1 // Algorithm - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - return l -} -func (rr *RP) len() int { - l := rr.Hdr.len() - l += len(rr.Mbox) + 1 - l += len(rr.Txt) + 1 - return l -} -func (rr *RRSIG) len() int { - l := rr.Hdr.len() - l += 2 // TypeCovered - l += 1 // Algorithm - l += 1 // Labels - l += 4 // OrigTtl - l += 4 // Expiration - l += 4 // Inception - l += 2 // KeyTag - l += len(rr.SignerName) + 1 - l += base64.StdEncoding.DecodedLen(len(rr.Signature)) - return l -} -func (rr *RT) len() int { - l := rr.Hdr.len() - l += 2 // Preference - l += len(rr.Host) + 1 - return l -} -func (rr *SMIMEA) len() int { - l := rr.Hdr.len() - l += 1 // Usage - l += 1 // Selector - l += 1 // MatchingType - l += len(rr.Certificate)/2 + 1 - return l -} -func (rr *SOA) len() int { - l := rr.Hdr.len() - l += len(rr.Ns) + 1 - l += len(rr.Mbox) + 1 - l += 4 // Serial - l += 4 // Refresh - l += 4 // Retry - l += 4 // Expire - l += 4 // Minttl - return l -} -func (rr *SPF) len() int { - l := rr.Hdr.len() - for _, x := range rr.Txt { - l += len(x) + 1 - } - return l -} -func (rr *SRV) len() int { - l := rr.Hdr.len() - l += 2 // Priority - l += 2 // Weight - l += 2 // Port - l += len(rr.Target) + 1 - return l -} -func (rr *SSHFP) len() int { - l := rr.Hdr.len() - l += 1 // Algorithm - l += 1 // Type - l += len(rr.FingerPrint)/2 + 1 - return l -} -func (rr *TA) len() int { - l := rr.Hdr.len() - l += 2 // KeyTag - l += 1 // Algorithm - l += 1 // DigestType - l += len(rr.Digest)/2 + 1 - return l -} -func (rr *TALINK) len() int { - l := rr.Hdr.len() - l += len(rr.PreviousName) + 1 - l += len(rr.NextName) + 1 - return l -} -func (rr *TKEY) len() int { - l := rr.Hdr.len() - l += len(rr.Algorithm) + 1 - l += 4 // Inception - l += 4 // Expiration - l += 2 // Mode - l += 2 // Error - l += 2 // KeySize - l += len(rr.Key) + 1 - l += 2 // OtherLen - l += len(rr.OtherData) + 1 - return l -} -func (rr *TLSA) len() int { - l := rr.Hdr.len() - l += 1 // Usage - l += 1 // Selector - l += 1 // MatchingType - l += len(rr.Certificate)/2 + 1 - return l -} -func (rr *TSIG) len() int { - l := rr.Hdr.len() - l += len(rr.Algorithm) + 1 - l += 6 // TimeSigned - l += 2 // Fudge - l += 2 // MACSize - l += len(rr.MAC)/2 + 1 - l += 2 // OrigId - l += 2 // Error - l += 2 // OtherLen - l += len(rr.OtherData)/2 + 1 - return l -} -func (rr *TXT) len() int { - l := rr.Hdr.len() - for _, x := range rr.Txt { - l += len(x) + 1 - } - return l -} -func (rr *UID) len() int { - l := rr.Hdr.len() - l += 4 // Uid - return l -} -func (rr *UINFO) len() int { - l := rr.Hdr.len() - l += len(rr.Uinfo) + 1 - return l -} -func (rr *URI) len() int { - l := rr.Hdr.len() - l += 2 // Priority - l += 2 // Weight - l += len(rr.Target) - return l -} -func (rr *X25) len() int { - l := rr.Hdr.len() - l += len(rr.PSDNAddress) + 1 - return l -} - -// copy() functions -func (rr *A) copy() RR { - return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)} -} -func (rr *AAAA) copy() RR { - return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)} -} -func (rr *AFSDB) copy() RR { - return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname} -} -func (rr *ANY) copy() RR { - return &ANY{*rr.Hdr.copyHeader()} -} -func (rr *CAA) copy() RR { - return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} -} -func (rr *CERT) copy() RR { - return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} -} -func (rr *CNAME) copy() RR { - return &CNAME{*rr.Hdr.copyHeader(), rr.Target} -} -func (rr *DHCID) copy() RR { - return &DHCID{*rr.Hdr.copyHeader(), rr.Digest} -} -func (rr *DNAME) copy() RR { - return &DNAME{*rr.Hdr.copyHeader(), rr.Target} -} -func (rr *DNSKEY) copy() RR { - return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} -} -func (rr *DS) copy() RR { - return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} -} -func (rr *EID) copy() RR { - return &EID{*rr.Hdr.copyHeader(), rr.Endpoint} -} -func (rr *EUI48) copy() RR { - return &EUI48{*rr.Hdr.copyHeader(), rr.Address} -} -func (rr *EUI64) copy() RR { - return &EUI64{*rr.Hdr.copyHeader(), rr.Address} -} -func (rr *GID) copy() RR { - return &GID{*rr.Hdr.copyHeader(), rr.Gid} -} -func (rr *GPOS) copy() RR { - return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude} -} -func (rr *HINFO) copy() RR { - return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} -} -func (rr *HIP) copy() RR { - RendezvousServers := make([]string, len(rr.RendezvousServers)) - copy(RendezvousServers, rr.RendezvousServers) - return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} -} -func (rr *KX) copy() RR { - return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger} -} -func (rr *L32) copy() RR { - return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)} -} -func (rr *L64) copy() RR { - return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64} -} -func (rr *LOC) copy() RR { - return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} -} -func (rr *LP) copy() RR { - return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn} -} -func (rr *MB) copy() RR { - return &MB{*rr.Hdr.copyHeader(), rr.Mb} -} -func (rr *MD) copy() RR { - return &MD{*rr.Hdr.copyHeader(), rr.Md} -} -func (rr *MF) copy() RR { - return &MF{*rr.Hdr.copyHeader(), rr.Mf} -} -func (rr *MG) copy() RR { - return &MG{*rr.Hdr.copyHeader(), rr.Mg} -} -func (rr *MINFO) copy() RR { - return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email} -} -func (rr *MR) copy() RR { - return &MR{*rr.Hdr.copyHeader(), rr.Mr} -} -func (rr *MX) copy() RR { - return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx} -} -func (rr *NAPTR) copy() RR { - return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} -} -func (rr *NID) copy() RR { - return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID} -} -func (rr *NIMLOC) copy() RR { - return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator} -} -func (rr *NINFO) copy() RR { - ZSData := make([]string, len(rr.ZSData)) - copy(ZSData, rr.ZSData) - return &NINFO{*rr.Hdr.copyHeader(), ZSData} -} -func (rr *NS) copy() RR { - return &NS{*rr.Hdr.copyHeader(), rr.Ns} -} -func (rr *NSAPPTR) copy() RR { - return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr} -} -func (rr *NSEC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap} -} -func (rr *NSEC3) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} -} -func (rr *NSEC3PARAM) copy() RR { - return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} -} -func (rr *OPENPGPKEY) copy() RR { - return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey} -} -func (rr *OPT) copy() RR { - Option := make([]EDNS0, len(rr.Option)) - copy(Option, rr.Option) - return &OPT{*rr.Hdr.copyHeader(), Option} -} -func (rr *PTR) copy() RR { - return &PTR{*rr.Hdr.copyHeader(), rr.Ptr} -} -func (rr *PX) copy() RR { - return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400} -} -func (rr *RFC3597) copy() RR { - return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata} -} -func (rr *RKEY) copy() RR { - return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} -} -func (rr *RP) copy() RR { - return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt} -} -func (rr *RRSIG) copy() RR { - return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} -} -func (rr *RT) copy() RR { - return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host} -} -func (rr *SMIMEA) copy() RR { - return &SMIMEA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} -} -func (rr *SOA) copy() RR { - return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} -} -func (rr *SPF) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &SPF{*rr.Hdr.copyHeader(), Txt} -} -func (rr *SRV) copy() RR { - return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target} -} -func (rr *SSHFP) copy() RR { - return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint} -} -func (rr *TA) copy() RR { - return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} -} -func (rr *TALINK) copy() RR { - return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName} -} -func (rr *TKEY) copy() RR { - return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} -} -func (rr *TLSA) copy() RR { - return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} -} -func (rr *TSIG) copy() RR { - return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} -} -func (rr *TXT) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &TXT{*rr.Hdr.copyHeader(), Txt} -} -func (rr *UID) copy() RR { - return &UID{*rr.Hdr.copyHeader(), rr.Uid} -} -func (rr *UINFO) copy() RR { - return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo} -} -func (rr *URI) copy() RR { - return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target} -} -func (rr *X25) copy() RR { - return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress} -} diff --git a/vendor/github.com/xenolf/lego/LICENSE b/vendor/github.com/xenolf/lego/LICENSE deleted file mode 100644 index 17460b71..00000000 --- a/vendor/github.com/xenolf/lego/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Sebastian Erhart - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/xenolf/lego/README.md b/vendor/github.com/xenolf/lego/README.md deleted file mode 100644 index ff692132..00000000 --- a/vendor/github.com/xenolf/lego/README.md +++ /dev/null @@ -1,248 +0,0 @@ -# lego -Let's Encrypt client and ACME library written in Go - -[![GoDoc](https://godoc.org/github.com/xenolf/lego/acme?status.svg)](https://godoc.org/github.com/xenolf/lego/acme) -[![Build Status](https://travis-ci.org/xenolf/lego.svg?branch=master)](https://travis-ci.org/xenolf/lego) -[![Dev Chat](https://img.shields.io/badge/dev%20chat-gitter-blue.svg?label=dev+chat)](https://gitter.im/xenolf/lego) - -#### General -This is a work in progress. Please do *NOT* run this on a production server and please report any bugs you find! - -#### Installation -lego supports both binary installs and install from source. - -To get the binary just download the latest release for your OS/Arch from [the release page](https://github.com/xenolf/lego/releases) -and put the binary somewhere convenient. lego does not assume anything about the location you run it from. - -To install from source, just run -``` -go get -u github.com/xenolf/lego -``` - -To build lego inside a Docker container, just run -``` -docker build -t lego . -``` - -#### Features - -- Register with CA -- Obtain certificates -- Renew certificates -- Revoke certificates -- Robust implementation of all ACME challenges - - HTTP (http-01) - - TLS with Server Name Indication (tls-sni-01) - - DNS (dns-01) -- SAN certificate support -- Comes with multiple optional [DNS providers](https://github.com/xenolf/lego/tree/master/providers/dns) -- [Custom challenge solvers](https://github.com/xenolf/lego/wiki/Writing-a-Challenge-Solver) -- Certificate bundling -- OCSP helper function - -Please keep in mind that CLI switches and APIs are still subject to change. - -When using the standard `--path` option, all certificates and account configurations are saved to a folder *.lego* in the current working directory. - -#### Sudo -The CLI does not require root permissions but needs to bind to port 80 and 443 for certain challenges. -To run the CLI without sudo, you have four options: - -- Use setcap 'cap_net_bind_service=+ep' /path/to/program -- Pass the `--http` or/and the `--tls` option and specify a custom port to bind to. In this case you have to forward port 80/443 to these custom ports (see [Port Usage](#port-usage)). -- Pass the `--webroot` option and specify the path to your webroot folder. In this case the challenge will be written in a file in `.well-known/acme-challenge/` inside your webroot. -- Pass the `--dns` option and specify a DNS provider. - -#### Port Usage -By default lego assumes it is able to bind to ports 80 and 443 to solve challenges. -If this is not possible in your environment, you can use the `--http` and `--tls` options to instruct -lego to listen on that interface:port for any incoming challenges. - -If you are using this option, make sure you proxy all of the following traffic to these ports. - -HTTP Port: -- All plaintext HTTP requests to port 80 which begin with a request path of `/.well-known/acme-challenge/` for the HTTP challenge. - -TLS Port: -- All TLS handshakes on port 443 for the TLS-SNI challenge. - -This traffic redirection is only needed as long as lego solves challenges. As soon as you have received your certificates you can deactivate the forwarding. - -#### Usage - -``` -NAME: - lego - Let's Encrypt client written in Go - -USAGE: - lego [global options] command [command options] [arguments...] - -VERSION: - 0.3.0 - -COMMANDS: - run Register an account, then create and install a certificate - revoke Revoke a certificate - renew Renew a certificate - dnshelp Shows additional help for the --dns global option - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS: - --domains, -d [--domains option --domains option] Add domains to the process - --server, -s "https://acme-v01.api.letsencrypt.org/directory" CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client. - --email, -m Email used for registration and recovery contact. - --accept-tos, -a By setting this flag to true you indicate that you accept the current Let's Encrypt terms of service. - --key-type, -k "rsa2048" Key type to use for private keys. Supported: rsa2048, rsa4096, rsa8192, ec256, ec384 - --path "${CWD}/.lego" Directory to use for storing the data - --exclude, -x [--exclude option --exclude option] Explicitly disallow solvers by name from being used. Solvers: "http-01", "tls-sni-01". - --webroot Set the webroot folder to use for HTTP based challenges to write directly in a file in .well-known/acme-challenge - --http Set the port and interface to use for HTTP based challenges to listen on. Supported: interface:port or :port - --tls Set the port and interface to use for TLS based challenges to listen on. Supported: interface:port or :port - --dns Solve a DNS challenge using the specified provider. Disables all other challenges. Run 'lego dnshelp' for help on usage. - --help, -h show help - --version, -v print the version -``` - -##### CLI Example - -Assumes the `lego` binary has permission to bind to ports 80 and 443. You can get a pre-built binary from the [releases](https://github.com/xenolf/lego/releases) page. -If your environment does not allow you to bind to these ports, please read [Port Usage](#port-usage). - -Obtain a certificate: - -```bash -$ lego --email="foo@bar.com" --domains="example.com" run -``` - -(Find your certificate in the `.lego` folder of current working directory.) - -To renew the certificate: - -```bash -$ lego --email="foo@bar.com" --domains="example.com" renew -``` - -Obtain a certificate using the DNS challenge and AWS Route 53: - -```bash -$ AWS_REGION=us-east-1 AWS_ACCESS_KEY_ID=my_id AWS_SECRET_ACCESS_KEY=my_key lego --email="foo@bar.com" --domains="example.com" --dns="route53" run -``` - -Note that `--dns=foo` implies `--exclude=http-01` and `--exclude=tls-sni-01`. lego will not attempt other challenges if you've told it to use DNS instead. - -lego defaults to communicating with the production Let's Encrypt ACME server. If you'd like to test something without issuing real certificates, consider using the staging endpoint instead: - -```bash -$ lego --server=https://acme-staging.api.letsencrypt.org/directory … -``` - -#### DNS Challenge API Details - -##### AWS Route 53 - -The following AWS IAM policy document describes the permissions required for lego to complete the DNS challenge. -Replace `` with the Route 53 zone ID of the domain you are authorizing. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "route53:GetChange", - "route53:ListHostedZonesByName" - ], - "Resource": [ - "*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "route53:ChangeResourceRecordSets" - ], - "Resource": [ - "arn:aws:route53:::hostedzone/" - ] - } - ] -} -``` - -#### ACME Library Usage - -A valid, but bare-bones example use of the acme package: - -```go -// You'll need a user or account type that implements acme.User -type MyUser struct { - Email string - Registration *acme.RegistrationResource - key crypto.PrivateKey -} -func (u MyUser) GetEmail() string { - return u.Email -} -func (u MyUser) GetRegistration() *acme.RegistrationResource { - return u.Registration -} -func (u MyUser) GetPrivateKey() crypto.PrivateKey { - return u.key -} - -// Create a user. New accounts need an email and private key to start. -const rsaKeySize = 2048 -privateKey, err := rsa.GenerateKey(rand.Reader, rsaKeySize) -if err != nil { - log.Fatal(err) -} -myUser := MyUser{ - Email: "you@yours.com", - key: privateKey, -} - -// A client facilitates communication with the CA server. This CA URL is -// configured for a local dev instance of Boulder running in Docker in a VM. -client, err := acme.NewClient("http://192.168.99.100:4000", &myUser, acme.RSA2048) -if err != nil { - log.Fatal(err) -} - -// We specify an http port of 5002 and an tls port of 5001 on all interfaces -// because we aren't running as root and can't bind a listener to port 80 and 443 -// (used later when we attempt to pass challenges). Keep in mind that we still -// need to proxy challenge traffic to port 5002 and 5001. -client.SetHTTPAddress(":5002") -client.SetTLSAddress(":5001") - -// New users will need to register -reg, err := client.Register() -if err != nil { - log.Fatal(err) -} -myUser.Registration = reg - -// SAVE THE USER. - -// The client has a URL to the current Let's Encrypt Subscriber -// Agreement. The user will need to agree to it. -err = client.AgreeToTOS() -if err != nil { - log.Fatal(err) -} - -// The acme library takes care of completing the challenges to obtain the certificate(s). -// The domains must resolve to this machine or you have to use the DNS challenge. -bundle := false -certificates, failures := client.ObtainCertificate([]string{"mydomain.com"}, bundle, nil) -if len(failures) > 0 { - log.Fatal(failures) -} - -// Each certificate comes back with the cert bytes, the bytes of the client's -// private key, and a certificate URL. SAVE THESE TO DISK. -fmt.Printf("%#v\n", certificates) - -// ... all done. -``` diff --git a/vendor/github.com/xenolf/lego/acme/challenges.go b/vendor/github.com/xenolf/lego/acme/challenges.go deleted file mode 100644 index 85790050..00000000 --- a/vendor/github.com/xenolf/lego/acme/challenges.go +++ /dev/null @@ -1,16 +0,0 @@ -package acme - -// Challenge is a string that identifies a particular type and version of ACME challenge. -type Challenge string - -const ( - // HTTP01 is the "http-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http - // Note: HTTP01ChallengePath returns the URL path to fulfill this challenge - HTTP01 = Challenge("http-01") - // TLSSNI01 is the "tls-sni-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#tls-with-server-name-indication-tls-sni - // Note: TLSSNI01ChallengeCert returns a certificate to fulfill this challenge - TLSSNI01 = Challenge("tls-sni-01") - // DNS01 is the "dns-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#dns - // Note: DNS01Record returns a DNS record which will fulfill this challenge - DNS01 = Challenge("dns-01") -) diff --git a/vendor/github.com/xenolf/lego/acme/client.go b/vendor/github.com/xenolf/lego/acme/client.go deleted file mode 100644 index 445dc2bd..00000000 --- a/vendor/github.com/xenolf/lego/acme/client.go +++ /dev/null @@ -1,702 +0,0 @@ -// Package acme implements the ACME protocol for Let's Encrypt and other conforming providers. -package acme - -import ( - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "regexp" - "strconv" - "strings" - "time" -) - -var ( - // Logger is an optional custom logger. - Logger *log.Logger -) - -// logf writes a log entry. It uses Logger if not -// nil, otherwise it uses the default log.Logger. -func logf(format string, args ...interface{}) { - if Logger != nil { - Logger.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// User interface is to be implemented by users of this library. -// It is used by the client type to get user specific information. -type User interface { - GetEmail() string - GetRegistration() *RegistrationResource - GetPrivateKey() crypto.PrivateKey -} - -// Interface for all challenge solvers to implement. -type solver interface { - Solve(challenge challenge, domain string) error -} - -type validateFunc func(j *jws, domain, uri string, chlng challenge) error - -// Client is the user-friendy way to ACME -type Client struct { - directory directory - user User - jws *jws - keyType KeyType - issuerCert []byte - solvers map[Challenge]solver -} - -// NewClient creates a new ACME client on behalf of the user. The client will depend on -// the ACME directory located at caDirURL for the rest of its actions. It will -// generate private keys for certificates of size keyBits. -func NewClient(caDirURL string, user User, keyType KeyType) (*Client, error) { - privKey := user.GetPrivateKey() - if privKey == nil { - return nil, errors.New("private key was nil") - } - - var dir directory - if _, err := getJSON(caDirURL, &dir); err != nil { - return nil, fmt.Errorf("get directory at '%s': %v", caDirURL, err) - } - - if dir.NewRegURL == "" { - return nil, errors.New("directory missing new registration URL") - } - if dir.NewAuthzURL == "" { - return nil, errors.New("directory missing new authz URL") - } - if dir.NewCertURL == "" { - return nil, errors.New("directory missing new certificate URL") - } - if dir.RevokeCertURL == "" { - return nil, errors.New("directory missing revoke certificate URL") - } - - jws := &jws{privKey: privKey, directoryURL: caDirURL} - - // REVIEW: best possibility? - // Add all available solvers with the right index as per ACME - // spec to this map. Otherwise they won`t be found. - solvers := make(map[Challenge]solver) - solvers[HTTP01] = &httpChallenge{jws: jws, validate: validate, provider: &HTTPProviderServer{}} - solvers[TLSSNI01] = &tlsSNIChallenge{jws: jws, validate: validate, provider: &TLSProviderServer{}} - - return &Client{directory: dir, user: user, jws: jws, keyType: keyType, solvers: solvers}, nil -} - -// SetChallengeProvider specifies a custom provider that will make the solution available -func (c *Client) SetChallengeProvider(challenge Challenge, p ChallengeProvider) error { - switch challenge { - case HTTP01: - c.solvers[challenge] = &httpChallenge{jws: c.jws, validate: validate, provider: p} - case TLSSNI01: - c.solvers[challenge] = &tlsSNIChallenge{jws: c.jws, validate: validate, provider: p} - case DNS01: - c.solvers[challenge] = &dnsChallenge{jws: c.jws, validate: validate, provider: p} - default: - return fmt.Errorf("Unknown challenge %v", challenge) - } - return nil -} - -// SetHTTPAddress specifies a custom interface:port to be used for HTTP based challenges. -// If this option is not used, the default port 80 and all interfaces will be used. -// To only specify a port and no interface use the ":port" notation. -func (c *Client) SetHTTPAddress(iface string) error { - host, port, err := net.SplitHostPort(iface) - if err != nil { - return err - } - - if chlng, ok := c.solvers[HTTP01]; ok { - chlng.(*httpChallenge).provider = NewHTTPProviderServer(host, port) - } - - return nil -} - -// SetTLSAddress specifies a custom interface:port to be used for TLS based challenges. -// If this option is not used, the default port 443 and all interfaces will be used. -// To only specify a port and no interface use the ":port" notation. -func (c *Client) SetTLSAddress(iface string) error { - host, port, err := net.SplitHostPort(iface) - if err != nil { - return err - } - - if chlng, ok := c.solvers[TLSSNI01]; ok { - chlng.(*tlsSNIChallenge).provider = NewTLSProviderServer(host, port) - } - return nil -} - -// ExcludeChallenges explicitly removes challenges from the pool for solving. -func (c *Client) ExcludeChallenges(challenges []Challenge) { - // Loop through all challenges and delete the requested one if found. - for _, challenge := range challenges { - delete(c.solvers, challenge) - } -} - -// Register the current account to the ACME server. -func (c *Client) Register() (*RegistrationResource, error) { - if c == nil || c.user == nil { - return nil, errors.New("acme: cannot register a nil client or user") - } - logf("[INFO] acme: Registering account for %s", c.user.GetEmail()) - - regMsg := registrationMessage{ - Resource: "new-reg", - } - if c.user.GetEmail() != "" { - regMsg.Contact = []string{"mailto:" + c.user.GetEmail()} - } else { - regMsg.Contact = []string{} - } - - var serverReg Registration - hdr, err := postJSON(c.jws, c.directory.NewRegURL, regMsg, &serverReg) - if err != nil { - return nil, err - } - - reg := &RegistrationResource{Body: serverReg} - - links := parseLinks(hdr["Link"]) - reg.URI = hdr.Get("Location") - if links["terms-of-service"] != "" { - reg.TosURL = links["terms-of-service"] - } - - if links["next"] != "" { - reg.NewAuthzURL = links["next"] - } else { - return nil, errors.New("acme: The server did not return 'next' link to proceed") - } - - return reg, nil -} - -// DeleteRegistration deletes the client's user registration from the ACME -// server. -func (c *Client) DeleteRegistration() error { - if c == nil || c.user == nil { - return errors.New("acme: cannot unregister a nil client or user") - } - logf("[INFO] acme: Deleting account for %s", c.user.GetEmail()) - - regMsg := registrationMessage{ - Resource: "reg", - Delete: true, - } - - _, err := postJSON(c.jws, c.user.GetRegistration().URI, regMsg, nil) - if err != nil { - return err - } - - return nil -} - -// QueryRegistration runs a POST request on the client's registration and -// returns the result. -// -// This is similar to the Register function, but acting on an existing -// registration link and resource. -func (c *Client) QueryRegistration() (*RegistrationResource, error) { - if c == nil || c.user == nil { - return nil, errors.New("acme: cannot query the registration of a nil client or user") - } - // Log the URL here instead of the email as the email may not be set - logf("[INFO] acme: Querying account for %s", c.user.GetRegistration().URI) - - regMsg := registrationMessage{ - Resource: "reg", - } - - var serverReg Registration - hdr, err := postJSON(c.jws, c.user.GetRegistration().URI, regMsg, &serverReg) - if err != nil { - return nil, err - } - - reg := &RegistrationResource{Body: serverReg} - - links := parseLinks(hdr["Link"]) - // Location: header is not returned so this needs to be populated off of - // existing URI - reg.URI = c.user.GetRegistration().URI - if links["terms-of-service"] != "" { - reg.TosURL = links["terms-of-service"] - } - - if links["next"] != "" { - reg.NewAuthzURL = links["next"] - } else { - return nil, errors.New("acme: No new-authz link in response to registration query") - } - - return reg, nil -} - -// AgreeToTOS updates the Client registration and sends the agreement to -// the server. -func (c *Client) AgreeToTOS() error { - reg := c.user.GetRegistration() - - reg.Body.Agreement = c.user.GetRegistration().TosURL - reg.Body.Resource = "reg" - _, err := postJSON(c.jws, c.user.GetRegistration().URI, c.user.GetRegistration().Body, nil) - return err -} - -// ObtainCertificate tries to obtain a single certificate using all domains passed into it. -// The first domain in domains is used for the CommonName field of the certificate, all other -// domains are added using the Subject Alternate Names extension. A new private key is generated -// for every invocation of this function. If you do not want that you can supply your own private key -// in the privKey parameter. If this parameter is non-nil it will be used instead of generating a new one. -// If bundle is true, the []byte contains both the issuer certificate and -// your issued certificate as a bundle. -// This function will never return a partial certificate. If one domain in the list fails, -// the whole certificate will fail. -func (c *Client) ObtainCertificate(domains []string, bundle bool, privKey crypto.PrivateKey) (CertificateResource, map[string]error) { - if bundle { - logf("[INFO][%s] acme: Obtaining bundled SAN certificate", strings.Join(domains, ", ")) - } else { - logf("[INFO][%s] acme: Obtaining SAN certificate", strings.Join(domains, ", ")) - } - - challenges, failures := c.getChallenges(domains) - // If any challenge fails - return. Do not generate partial SAN certificates. - if len(failures) > 0 { - return CertificateResource{}, failures - } - - errs := c.solveChallenges(challenges) - // If any challenge fails - return. Do not generate partial SAN certificates. - if len(errs) > 0 { - return CertificateResource{}, errs - } - - logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", ")) - - cert, err := c.requestCertificate(challenges, bundle, privKey) - if err != nil { - for _, chln := range challenges { - failures[chln.Domain] = err - } - } - - return cert, failures -} - -// RevokeCertificate takes a PEM encoded certificate or bundle and tries to revoke it at the CA. -func (c *Client) RevokeCertificate(certificate []byte) error { - certificates, err := parsePEMBundle(certificate) - if err != nil { - return err - } - - x509Cert := certificates[0] - if x509Cert.IsCA { - return fmt.Errorf("Certificate bundle starts with a CA certificate") - } - - encodedCert := base64.URLEncoding.EncodeToString(x509Cert.Raw) - - _, err = postJSON(c.jws, c.directory.RevokeCertURL, revokeCertMessage{Resource: "revoke-cert", Certificate: encodedCert}, nil) - return err -} - -// RenewCertificate takes a CertificateResource and tries to renew the certificate. -// If the renewal process succeeds, the new certificate will ge returned in a new CertResource. -// Please be aware that this function will return a new certificate in ANY case that is not an error. -// If the server does not provide us with a new cert on a GET request to the CertURL -// this function will start a new-cert flow where a new certificate gets generated. -// If bundle is true, the []byte contains both the issuer certificate and -// your issued certificate as a bundle. -// For private key reuse the PrivateKey property of the passed in CertificateResource should be non-nil. -func (c *Client) RenewCertificate(cert CertificateResource, bundle bool) (CertificateResource, error) { - // Input certificate is PEM encoded. Decode it here as we may need the decoded - // cert later on in the renewal process. The input may be a bundle or a single certificate. - certificates, err := parsePEMBundle(cert.Certificate) - if err != nil { - return CertificateResource{}, err - } - - x509Cert := certificates[0] - if x509Cert.IsCA { - return CertificateResource{}, fmt.Errorf("[%s] Certificate bundle starts with a CA certificate", cert.Domain) - } - - // This is just meant to be informal for the user. - timeLeft := x509Cert.NotAfter.Sub(time.Now().UTC()) - logf("[INFO][%s] acme: Trying renewal with %d hours remaining", cert.Domain, int(timeLeft.Hours())) - - // The first step of renewal is to check if we get a renewed cert - // directly from the cert URL. - resp, err := httpGet(cert.CertURL) - if err != nil { - return CertificateResource{}, err - } - defer resp.Body.Close() - serverCertBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return CertificateResource{}, err - } - - serverCert, err := x509.ParseCertificate(serverCertBytes) - if err != nil { - return CertificateResource{}, err - } - - // If the server responds with a different certificate we are effectively renewed. - // TODO: Further test if we can actually use the new certificate (Our private key works) - if !x509Cert.Equal(serverCert) { - logf("[INFO][%s] acme: Server responded with renewed certificate", cert.Domain) - issuedCert := pemEncode(derCertificateBytes(serverCertBytes)) - // If bundle is true, we want to return a certificate bundle. - // To do this, we need the issuer certificate. - if bundle { - // The issuer certificate link is always supplied via an "up" link - // in the response headers of a new certificate. - links := parseLinks(resp.Header["Link"]) - issuerCert, err := c.getIssuerCertificate(links["up"]) - if err != nil { - // If we fail to acquire the issuer cert, return the issued certificate - do not fail. - logf("[ERROR][%s] acme: Could not bundle issuer certificate: %v", cert.Domain, err) - } else { - // Success - append the issuer cert to the issued cert. - issuerCert = pemEncode(derCertificateBytes(issuerCert)) - issuedCert = append(issuedCert, issuerCert...) - } - } - - cert.Certificate = issuedCert - return cert, nil - } - - var privKey crypto.PrivateKey - if cert.PrivateKey != nil { - privKey, err = parsePEMPrivateKey(cert.PrivateKey) - if err != nil { - return CertificateResource{}, err - } - } - - var domains []string - var failures map[string]error - // check for SAN certificate - if len(x509Cert.DNSNames) > 1 { - domains = append(domains, x509Cert.Subject.CommonName) - for _, sanDomain := range x509Cert.DNSNames { - if sanDomain == x509Cert.Subject.CommonName { - continue - } - domains = append(domains, sanDomain) - } - } else { - domains = append(domains, x509Cert.Subject.CommonName) - } - - newCert, failures := c.ObtainCertificate(domains, bundle, privKey) - return newCert, failures[cert.Domain] -} - -// Looks through the challenge combinations to find a solvable match. -// Then solves the challenges in series and returns. -func (c *Client) solveChallenges(challenges []authorizationResource) map[string]error { - // loop through the resources, basically through the domains. - failures := make(map[string]error) - for _, authz := range challenges { - // no solvers - no solving - if solvers := c.chooseSolvers(authz.Body, authz.Domain); solvers != nil { - for i, solver := range solvers { - // TODO: do not immediately fail if one domain fails to validate. - err := solver.Solve(authz.Body.Challenges[i], authz.Domain) - if err != nil { - failures[authz.Domain] = err - } - } - } else { - failures[authz.Domain] = fmt.Errorf("[%s] acme: Could not determine solvers", authz.Domain) - } - } - - return failures -} - -// Checks all combinations from the server and returns an array of -// solvers which should get executed in series. -func (c *Client) chooseSolvers(auth authorization, domain string) map[int]solver { - for _, combination := range auth.Combinations { - solvers := make(map[int]solver) - for _, idx := range combination { - if solver, ok := c.solvers[auth.Challenges[idx].Type]; ok { - solvers[idx] = solver - } else { - logf("[INFO][%s] acme: Could not find solver for: %s", domain, auth.Challenges[idx].Type) - } - } - - // If we can solve the whole combination, return the solvers - if len(solvers) == len(combination) { - return solvers - } - } - return nil -} - -// Get the challenges needed to proof our identifier to the ACME server. -func (c *Client) getChallenges(domains []string) ([]authorizationResource, map[string]error) { - resc, errc := make(chan authorizationResource), make(chan domainError) - - for _, domain := range domains { - go func(domain string) { - authMsg := authorization{Resource: "new-authz", Identifier: identifier{Type: "dns", Value: domain}} - var authz authorization - hdr, err := postJSON(c.jws, c.user.GetRegistration().NewAuthzURL, authMsg, &authz) - if err != nil { - errc <- domainError{Domain: domain, Error: err} - return - } - - links := parseLinks(hdr["Link"]) - if links["next"] == "" { - logf("[ERROR][%s] acme: Server did not provide next link to proceed", domain) - return - } - - resc <- authorizationResource{Body: authz, NewCertURL: links["next"], AuthURL: hdr.Get("Location"), Domain: domain} - }(domain) - } - - responses := make(map[string]authorizationResource) - failures := make(map[string]error) - for i := 0; i < len(domains); i++ { - select { - case res := <-resc: - responses[res.Domain] = res - case err := <-errc: - failures[err.Domain] = err.Error - } - } - - challenges := make([]authorizationResource, 0, len(responses)) - for _, domain := range domains { - if challenge, ok := responses[domain]; ok { - challenges = append(challenges, challenge) - } - } - - close(resc) - close(errc) - - return challenges, failures -} - -func (c *Client) requestCertificate(authz []authorizationResource, bundle bool, privKey crypto.PrivateKey) (CertificateResource, error) { - if len(authz) == 0 { - return CertificateResource{}, errors.New("Passed no authorizations to requestCertificate!") - } - - commonName := authz[0] - var err error - if privKey == nil { - privKey, err = generatePrivateKey(c.keyType) - if err != nil { - return CertificateResource{}, err - } - } - - var san []string - var authURLs []string - for _, auth := range authz[1:] { - san = append(san, auth.Domain) - authURLs = append(authURLs, auth.AuthURL) - } - - // TODO: should the CSR be customizable? - csr, err := generateCsr(privKey, commonName.Domain, san) - if err != nil { - return CertificateResource{}, err - } - - csrString := base64.URLEncoding.EncodeToString(csr) - jsonBytes, err := json.Marshal(csrMessage{Resource: "new-cert", Csr: csrString, Authorizations: authURLs}) - if err != nil { - return CertificateResource{}, err - } - - resp, err := c.jws.post(commonName.NewCertURL, jsonBytes) - if err != nil { - return CertificateResource{}, err - } - - privateKeyPem := pemEncode(privKey) - cerRes := CertificateResource{ - Domain: commonName.Domain, - CertURL: resp.Header.Get("Location"), - PrivateKey: privateKeyPem} - - for { - switch resp.StatusCode { - case 201, 202: - cert, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) - resp.Body.Close() - if err != nil { - return CertificateResource{}, err - } - - // The server returns a body with a length of zero if the - // certificate was not ready at the time this request completed. - // Otherwise the body is the certificate. - if len(cert) > 0 { - - cerRes.CertStableURL = resp.Header.Get("Content-Location") - cerRes.AccountRef = c.user.GetRegistration().URI - - issuedCert := pemEncode(derCertificateBytes(cert)) - // If bundle is true, we want to return a certificate bundle. - // To do this, we need the issuer certificate. - if bundle { - // The issuer certificate link is always supplied via an "up" link - // in the response headers of a new certificate. - links := parseLinks(resp.Header["Link"]) - issuerCert, err := c.getIssuerCertificate(links["up"]) - if err != nil { - // If we fail to acquire the issuer cert, return the issued certificate - do not fail. - logf("[WARNING][%s] acme: Could not bundle issuer certificate: %v", commonName.Domain, err) - } else { - // Success - append the issuer cert to the issued cert. - issuerCert = pemEncode(derCertificateBytes(issuerCert)) - issuedCert = append(issuedCert, issuerCert...) - } - } - - cerRes.Certificate = issuedCert - logf("[INFO][%s] Server responded with a certificate.", commonName.Domain) - return cerRes, nil - } - - // The certificate was granted but is not yet issued. - // Check retry-after and loop. - ra := resp.Header.Get("Retry-After") - retryAfter, err := strconv.Atoi(ra) - if err != nil { - return CertificateResource{}, err - } - - logf("[INFO][%s] acme: Server responded with status 202; retrying after %ds", commonName.Domain, retryAfter) - time.Sleep(time.Duration(retryAfter) * time.Second) - - break - default: - return CertificateResource{}, handleHTTPError(resp) - } - - resp, err = httpGet(cerRes.CertURL) - if err != nil { - return CertificateResource{}, err - } - } -} - -// getIssuerCertificate requests the issuer certificate and caches it for -// subsequent requests. -func (c *Client) getIssuerCertificate(url string) ([]byte, error) { - logf("[INFO] acme: Requesting issuer cert from %s", url) - if c.issuerCert != nil { - return c.issuerCert, nil - } - - resp, err := httpGet(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) - if err != nil { - return nil, err - } - - _, err = x509.ParseCertificate(issuerBytes) - if err != nil { - return nil, err - } - - c.issuerCert = issuerBytes - return issuerBytes, err -} - -func parseLinks(links []string) map[string]string { - aBrkt := regexp.MustCompile("[<>]") - slver := regexp.MustCompile("(.+) *= *\"(.+)\"") - linkMap := make(map[string]string) - - for _, link := range links { - - link = aBrkt.ReplaceAllString(link, "") - parts := strings.Split(link, ";") - - matches := slver.FindStringSubmatch(parts[1]) - if len(matches) > 0 { - linkMap[matches[2]] = parts[0] - } - } - - return linkMap -} - -// validate makes the ACME server start validating a -// challenge response, only returning once it is done. -func validate(j *jws, domain, uri string, chlng challenge) error { - var challengeResponse challenge - - hdr, err := postJSON(j, uri, chlng, &challengeResponse) - if err != nil { - return err - } - - // After the path is sent, the ACME server will access our server. - // Repeatedly check the server for an updated status on our request. - for { - switch challengeResponse.Status { - case "valid": - logf("[INFO][%s] The server validated our request", domain) - return nil - case "pending": - break - case "invalid": - return handleChallengeError(challengeResponse) - default: - return errors.New("The server returned an unexpected state.") - } - - ra, err := strconv.Atoi(hdr.Get("Retry-After")) - if err != nil { - // The ACME server MUST return a Retry-After. - // If it doesn't, we'll just poll hard. - ra = 1 - } - time.Sleep(time.Duration(ra) * time.Second) - - hdr, err = getJSON(uri, &challengeResponse) - if err != nil { - return err - } - } -} diff --git a/vendor/github.com/xenolf/lego/acme/crypto.go b/vendor/github.com/xenolf/lego/acme/crypto.go deleted file mode 100644 index fc20442f..00000000 --- a/vendor/github.com/xenolf/lego/acme/crypto.go +++ /dev/null @@ -1,323 +0,0 @@ -package acme - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "io" - "io/ioutil" - "math/big" - "net/http" - "strings" - "time" - - "golang.org/x/crypto/ocsp" -) - -// KeyType represents the key algo as well as the key size or curve to use. -type KeyType string -type derCertificateBytes []byte - -// Constants for all key types we support. -const ( - EC256 = KeyType("P256") - EC384 = KeyType("P384") - RSA2048 = KeyType("2048") - RSA4096 = KeyType("4096") - RSA8192 = KeyType("8192") -) - -const ( - // OCSPGood means that the certificate is valid. - OCSPGood = ocsp.Good - // OCSPRevoked means that the certificate has been deliberately revoked. - OCSPRevoked = ocsp.Revoked - // OCSPUnknown means that the OCSP responder doesn't know about the certificate. - OCSPUnknown = ocsp.Unknown - // OCSPServerFailed means that the OCSP responder failed to process the request. - OCSPServerFailed = ocsp.ServerFailed -) - -// GetOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response, -// the parsed response, and an error, if any. The returned []byte can be passed directly -// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the -// issued certificate, this function will try to get the issuer certificate from the -// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return -// values are nil, the OCSP status may be assumed OCSPUnknown. -func GetOCSPForCert(bundle []byte) ([]byte, *ocsp.Response, error) { - certificates, err := parsePEMBundle(bundle) - if err != nil { - return nil, nil, err - } - - // We expect the certificate slice to be ordered downwards the chain. - // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it, - // which should always be the first two certificates. If there's no - // OCSP server listed in the leaf cert, there's nothing to do. And if - // we have only one certificate so far, we need to get the issuer cert. - issuedCert := certificates[0] - if len(issuedCert.OCSPServer) == 0 { - return nil, nil, errors.New("no OCSP server specified in cert") - } - if len(certificates) == 1 { - // TODO: build fallback. If this fails, check the remaining array entries. - if len(issuedCert.IssuingCertificateURL) == 0 { - return nil, nil, errors.New("no issuing certificate URL") - } - - resp, err := httpGet(issuedCert.IssuingCertificateURL[0]) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) - if err != nil { - return nil, nil, err - } - - issuerCert, err := x509.ParseCertificate(issuerBytes) - if err != nil { - return nil, nil, err - } - - // Insert it into the slice on position 0 - // We want it ordered right SRV CRT -> CA - certificates = append(certificates, issuerCert) - } - issuerCert := certificates[1] - - // Finally kick off the OCSP request. - ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil) - if err != nil { - return nil, nil, err - } - - reader := bytes.NewReader(ocspReq) - req, err := httpPost(issuedCert.OCSPServer[0], "application/ocsp-request", reader) - if err != nil { - return nil, nil, err - } - defer req.Body.Close() - - ocspResBytes, err := ioutil.ReadAll(limitReader(req.Body, 1024*1024)) - ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert) - if err != nil { - return nil, nil, err - } - - if ocspRes.Certificate == nil { - err = ocspRes.CheckSignatureFrom(issuerCert) - if err != nil { - return nil, nil, err - } - } - - return ocspResBytes, ocspRes, nil -} - -func getKeyAuthorization(token string, key interface{}) (string, error) { - var publicKey crypto.PublicKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - publicKey = k.Public() - case *rsa.PrivateKey: - publicKey = k.Public() - } - - // Generate the Key Authorization for the challenge - jwk := keyAsJWK(publicKey) - if jwk == nil { - return "", errors.New("Could not generate JWK from key.") - } - thumbBytes, err := jwk.Thumbprint(crypto.SHA256) - if err != nil { - return "", err - } - - // unpad the base64URL - keyThumb := base64.URLEncoding.EncodeToString(thumbBytes) - index := strings.Index(keyThumb, "=") - if index != -1 { - keyThumb = keyThumb[:index] - } - - return token + "." + keyThumb, nil -} - -// parsePEMBundle parses a certificate bundle from top to bottom and returns -// a slice of x509 certificates. This function will error if no certificates are found. -func parsePEMBundle(bundle []byte) ([]*x509.Certificate, error) { - var certificates []*x509.Certificate - var certDERBlock *pem.Block - - for { - certDERBlock, bundle = pem.Decode(bundle) - if certDERBlock == nil { - break - } - - if certDERBlock.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(certDERBlock.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } - } - - if len(certificates) == 0 { - return nil, errors.New("No certificates were found while parsing the bundle.") - } - - return certificates, nil -} - -func parsePEMPrivateKey(key []byte) (crypto.PrivateKey, error) { - keyBlock, _ := pem.Decode(key) - - switch keyBlock.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(keyBlock.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(keyBlock.Bytes) - default: - return nil, errors.New("Unknown PEM header value") - } -} - -func generatePrivateKey(keyType KeyType) (crypto.PrivateKey, error) { - - switch keyType { - case EC256: - return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case EC384: - return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - case RSA2048: - return rsa.GenerateKey(rand.Reader, 2048) - case RSA4096: - return rsa.GenerateKey(rand.Reader, 4096) - case RSA8192: - return rsa.GenerateKey(rand.Reader, 8192) - } - - return nil, fmt.Errorf("Invalid KeyType: %s", keyType) -} - -func generateCsr(privateKey crypto.PrivateKey, domain string, san []string) ([]byte, error) { - template := x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: domain, - }, - } - - if len(san) > 0 { - template.DNSNames = san - } - - return x509.CreateCertificateRequest(rand.Reader, &template, privateKey) -} - -func pemEncode(data interface{}) []byte { - var pemBlock *pem.Block - switch key := data.(type) { - case *ecdsa.PrivateKey: - keyBytes, _ := x509.MarshalECPrivateKey(key) - pemBlock = &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes} - case *rsa.PrivateKey: - pemBlock = &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)} - break - case derCertificateBytes: - pemBlock = &pem.Block{Type: "CERTIFICATE", Bytes: []byte(data.(derCertificateBytes))} - } - - return pem.EncodeToMemory(pemBlock) -} - -func pemDecode(data []byte) (*pem.Block, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, fmt.Errorf("Pem decode did not yield a valid block. Is the certificate in the right format?") - } - - return pemBlock, nil -} - -func pemDecodeTox509(pem []byte) (*x509.Certificate, error) { - pemBlock, err := pemDecode(pem) - if pemBlock == nil { - return nil, err - } - - return x509.ParseCertificate(pemBlock.Bytes) -} - -// GetPEMCertExpiration returns the "NotAfter" date of a PEM encoded certificate. -// The certificate has to be PEM encoded. Any other encodings like DER will fail. -func GetPEMCertExpiration(cert []byte) (time.Time, error) { - pemBlock, err := pemDecode(cert) - if pemBlock == nil { - return time.Time{}, err - } - - return getCertExpiration(pemBlock.Bytes) -} - -// getCertExpiration returns the "NotAfter" date of a DER encoded certificate. -func getCertExpiration(cert []byte) (time.Time, error) { - pCert, err := x509.ParseCertificate(cert) - if err != nil { - return time.Time{}, err - } - - return pCert.NotAfter, nil -} - -func generatePemCert(privKey *rsa.PrivateKey, domain string) ([]byte, error) { - derBytes, err := generateDerCert(privKey, time.Time{}, domain) - if err != nil { - return nil, err - } - - return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil -} - -func generateDerCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]byte, error) { - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, err - } - - if expiration.IsZero() { - expiration = time.Now().Add(365) - } - - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: "ACME Challenge TEMP", - }, - NotBefore: time.Now(), - NotAfter: expiration, - - KeyUsage: x509.KeyUsageKeyEncipherment, - BasicConstraintsValid: true, - DNSNames: []string{domain}, - } - - return x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey) -} - -func limitReader(rd io.ReadCloser, numBytes int64) io.ReadCloser { - return http.MaxBytesReader(nil, rd, numBytes) -} diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge.go b/vendor/github.com/xenolf/lego/acme/dns_challenge.go deleted file mode 100644 index 2f45e2a9..00000000 --- a/vendor/github.com/xenolf/lego/acme/dns_challenge.go +++ /dev/null @@ -1,279 +0,0 @@ -package acme - -import ( - "crypto/sha256" - "encoding/base64" - "errors" - "fmt" - "log" - "net" - "strings" - "time" - - "github.com/miekg/dns" - "golang.org/x/net/publicsuffix" -) - -type preCheckDNSFunc func(fqdn, value string) (bool, error) - -var ( - preCheckDNS preCheckDNSFunc = checkDNSPropagation - fqdnToZone = map[string]string{} -) - -var RecursiveNameservers = []string{ - "google-public-dns-a.google.com:53", - "google-public-dns-b.google.com:53", -} - -// DNSTimeout is used to override the default DNS timeout of 10 seconds. -var DNSTimeout = 10 * time.Second - -// DNS01Record returns a DNS record which will fulfill the `dns-01` challenge -func DNS01Record(domain, keyAuth string) (fqdn string, value string, ttl int) { - keyAuthShaBytes := sha256.Sum256([]byte(keyAuth)) - // base64URL encoding without padding - keyAuthSha := base64.URLEncoding.EncodeToString(keyAuthShaBytes[:sha256.Size]) - value = strings.TrimRight(keyAuthSha, "=") - ttl = 120 - fqdn = fmt.Sprintf("_acme-challenge.%s.", domain) - return -} - -// dnsChallenge implements the dns-01 challenge according to ACME 7.5 -type dnsChallenge struct { - jws *jws - validate validateFunc - provider ChallengeProvider -} - -func (s *dnsChallenge) Solve(chlng challenge, domain string) error { - logf("[INFO][%s] acme: Trying to solve DNS-01", domain) - - if s.provider == nil { - return errors.New("No DNS Provider configured") - } - - // Generate the Key Authorization for the challenge - keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey) - if err != nil { - return err - } - - err = s.provider.Present(domain, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("Error presenting token: %s", err) - } - defer func() { - err := s.provider.CleanUp(domain, chlng.Token, keyAuth) - if err != nil { - log.Printf("Error cleaning up %s: %v ", domain, err) - } - }() - - fqdn, value, _ := DNS01Record(domain, keyAuth) - - logf("[INFO][%s] Checking DNS record propagation...", domain) - - var timeout, interval time.Duration - switch provider := s.provider.(type) { - case ChallengeProviderTimeout: - timeout, interval = provider.Timeout() - default: - timeout, interval = 60*time.Second, 2*time.Second - } - - err = WaitFor(timeout, interval, func() (bool, error) { - return preCheckDNS(fqdn, value) - }) - if err != nil { - return err - } - - return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) -} - -// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers. -func checkDNSPropagation(fqdn, value string) (bool, error) { - // Initial attempt to resolve at the recursive NS - r, err := dnsQuery(fqdn, dns.TypeTXT, RecursiveNameservers, true) - if err != nil { - return false, err - } - if r.Rcode == dns.RcodeSuccess { - // If we see a CNAME here then use the alias - for _, rr := range r.Answer { - if cn, ok := rr.(*dns.CNAME); ok { - if cn.Hdr.Name == fqdn { - fqdn = cn.Target - break - } - } - } - } - - authoritativeNss, err := lookupNameservers(fqdn) - if err != nil { - return false, err - } - - return checkAuthoritativeNss(fqdn, value, authoritativeNss) -} - -// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record. -func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) { - for _, ns := range nameservers { - r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false) - if err != nil { - return false, err - } - - if r.Rcode != dns.RcodeSuccess { - return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn) - } - - var found bool - for _, rr := range r.Answer { - if txt, ok := rr.(*dns.TXT); ok { - if strings.Join(txt.Txt, "") == value { - found = true - break - } - } - } - - if !found { - return false, fmt.Errorf("NS %s did not return the expected TXT record", ns) - } - } - - return true, nil -} - -// dnsQuery will query a nameserver, iterating through the supplied servers as it retries -// The nameserver should include a port, to facilitate testing where we talk to a mock dns server. -func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (in *dns.Msg, err error) { - m := new(dns.Msg) - m.SetQuestion(fqdn, rtype) - m.SetEdns0(4096, false) - - if !recursive { - m.RecursionDesired = false - } - - // Will retry the request based on the number of servers (n+1) - for i := 1; i <= len(nameservers)+1; i++ { - ns := nameservers[i%len(nameservers)] - udp := &dns.Client{Net: "udp", Timeout: DNSTimeout} - in, _, err = udp.Exchange(m, ns) - - if err == dns.ErrTruncated { - tcp := &dns.Client{Net: "tcp", Timeout: DNSTimeout} - // If the TCP request suceeds, the err will reset to nil - in, _, err = tcp.Exchange(m, ns) - } - - if err == nil { - break - } - } - return -} - -// lookupNameservers returns the authoritative nameservers for the given fqdn. -func lookupNameservers(fqdn string) ([]string, error) { - var authoritativeNss []string - - zone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) - if err != nil { - return nil, err - } - - r, err := dnsQuery(zone, dns.TypeNS, RecursiveNameservers, true) - if err != nil { - return nil, err - } - - for _, rr := range r.Answer { - if ns, ok := rr.(*dns.NS); ok { - authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns)) - } - } - - if len(authoritativeNss) > 0 { - return authoritativeNss, nil - } - return nil, fmt.Errorf("Could not determine authoritative nameservers") -} - -// FindZoneByFqdn determines the zone of the given fqdn -func FindZoneByFqdn(fqdn string, nameservers []string) (string, error) { - // Do we have it cached? - if zone, ok := fqdnToZone[fqdn]; ok { - return zone, nil - } - - // Query the authoritative nameserver for a hopefully non-existing SOA record, - // in the authority section of the reply it will have the SOA of the - // containing zone. rfc2308 has this to say on the subject: - // Name servers authoritative for a zone MUST include the SOA record of - // the zone in the authority section of the response when reporting an - // NXDOMAIN or indicating that no data (NODATA) of the requested type exists - in, err := dnsQuery(fqdn, dns.TypeSOA, nameservers, true) - if err != nil { - return "", err - } - if in.Rcode != dns.RcodeNameError { - if in.Rcode != dns.RcodeSuccess { - return "", fmt.Errorf("The NS returned %s for %s", dns.RcodeToString[in.Rcode], fqdn) - } - // We have a success, so one of the answers has to be a SOA RR - for _, ans := range in.Answer { - if soa, ok := ans.(*dns.SOA); ok { - return checkIfTLD(fqdn, soa) - } - } - // Or it is NODATA, fall through to NXDOMAIN - } - // Search the authority section for our precious SOA RR - for _, ns := range in.Ns { - if soa, ok := ns.(*dns.SOA); ok { - return checkIfTLD(fqdn, soa) - } - } - return "", fmt.Errorf("The NS did not return the expected SOA record in the authority section") -} - -func checkIfTLD(fqdn string, soa *dns.SOA) (string, error) { - zone := soa.Hdr.Name - // If we ended up on one of the TLDs, it means the domain did not exist. - publicsuffix, _ := publicsuffix.PublicSuffix(UnFqdn(zone)) - if publicsuffix == UnFqdn(zone) { - return "", fmt.Errorf("Could not determine zone authoritatively") - } - fqdnToZone[fqdn] = zone - return zone, nil -} - -// ClearFqdnCache clears the cache of fqdn to zone mappings. Primarily used in testing. -func ClearFqdnCache() { - fqdnToZone = map[string]string{} -} - -// ToFqdn converts the name into a fqdn appending a trailing dot. -func ToFqdn(name string) string { - n := len(name) - if n == 0 || name[n-1] == '.' { - return name - } - return name + "." -} - -// UnFqdn converts the fqdn into a name removing the trailing dot. -func UnFqdn(name string) string { - n := len(name) - if n != 0 && name[n-1] == '.' { - return name[:n-1] - } - return name -} diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go b/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go deleted file mode 100644 index 240384e6..00000000 --- a/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go +++ /dev/null @@ -1,53 +0,0 @@ -package acme - -import ( - "bufio" - "fmt" - "os" -) - -const ( - dnsTemplate = "%s %d IN TXT \"%s\"" -) - -// DNSProviderManual is an implementation of the ChallengeProvider interface -type DNSProviderManual struct{} - -// NewDNSProviderManual returns a DNSProviderManual instance. -func NewDNSProviderManual() (*DNSProviderManual, error) { - return &DNSProviderManual{}, nil -} - -// Present prints instructions for manually creating the TXT record -func (*DNSProviderManual) Present(domain, token, keyAuth string) error { - fqdn, value, ttl := DNS01Record(domain, keyAuth) - dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, value) - - authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) - if err != nil { - return err - } - - logf("[INFO] acme: Please create the following TXT record in your %s zone:", authZone) - logf("[INFO] acme: %s", dnsRecord) - logf("[INFO] acme: Press 'Enter' when you are done") - - reader := bufio.NewReader(os.Stdin) - _, _ = reader.ReadString('\n') - return nil -} - -// CleanUp prints instructions for manually removing the TXT record -func (*DNSProviderManual) CleanUp(domain, token, keyAuth string) error { - fqdn, _, ttl := DNS01Record(domain, keyAuth) - dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, "...") - - authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) - if err != nil { - return err - } - - logf("[INFO] acme: You can now remove this TXT record from your %s zone:", authZone) - logf("[INFO] acme: %s", dnsRecord) - return nil -} diff --git a/vendor/github.com/xenolf/lego/acme/error.go b/vendor/github.com/xenolf/lego/acme/error.go deleted file mode 100644 index 2aa690b3..00000000 --- a/vendor/github.com/xenolf/lego/acme/error.go +++ /dev/null @@ -1,86 +0,0 @@ -package acme - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" -) - -const ( - tosAgreementError = "Must agree to subscriber agreement before any further actions" -) - -// RemoteError is the base type for all errors specific to the ACME protocol. -type RemoteError struct { - StatusCode int `json:"status,omitempty"` - Type string `json:"type"` - Detail string `json:"detail"` -} - -func (e RemoteError) Error() string { - return fmt.Sprintf("acme: Error %d - %s - %s", e.StatusCode, e.Type, e.Detail) -} - -// TOSError represents the error which is returned if the user needs to -// accept the TOS. -// TODO: include the new TOS url if we can somehow obtain it. -type TOSError struct { - RemoteError -} - -type domainError struct { - Domain string - Error error -} - -type challengeError struct { - RemoteError - records []validationRecord -} - -func (c challengeError) Error() string { - - var errStr string - for _, validation := range c.records { - errStr = errStr + fmt.Sprintf("\tValidation for %s:%s\n\tResolved to:\n\t\t%s\n\tUsed: %s\n\n", - validation.Hostname, validation.Port, strings.Join(validation.ResolvedAddresses, "\n\t\t"), validation.UsedAddress) - } - - return fmt.Sprintf("%s\nError Detail:\n%s", c.RemoteError.Error(), errStr) -} - -func handleHTTPError(resp *http.Response) error { - var errorDetail RemoteError - - contenType := resp.Header.Get("Content-Type") - // try to decode the content as JSON - if contenType == "application/json" || contenType == "application/problem+json" { - decoder := json.NewDecoder(resp.Body) - err := decoder.Decode(&errorDetail) - if err != nil { - return err - } - } else { - detailBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) - if err != nil { - return err - } - - errorDetail.Detail = string(detailBytes) - } - - errorDetail.StatusCode = resp.StatusCode - - // Check for errors we handle specifically - if errorDetail.StatusCode == http.StatusForbidden && errorDetail.Detail == tosAgreementError { - return TOSError{errorDetail} - } - - return errorDetail -} - -func handleChallengeError(chlng challenge) error { - return challengeError{chlng.Error, chlng.ValidationRecords} -} diff --git a/vendor/github.com/xenolf/lego/acme/http.go b/vendor/github.com/xenolf/lego/acme/http.go deleted file mode 100644 index 3b5a37cb..00000000 --- a/vendor/github.com/xenolf/lego/acme/http.go +++ /dev/null @@ -1,120 +0,0 @@ -package acme - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "runtime" - "strings" - "time" -) - -// UserAgent (if non-empty) will be tacked onto the User-Agent string in requests. -var UserAgent string - -// HTTPTimeout is used to override the default HTTP timeout of 10 seconds. -var HTTPTimeout = 10 * time.Second - -// defaultClient is an HTTP client with a reasonable timeout value. -var defaultClient = http.Client{Timeout: HTTPTimeout} - -const ( - // defaultGoUserAgent is the Go HTTP package user agent string. Too - // bad it isn't exported. If it changes, we should update it here, too. - defaultGoUserAgent = "Go-http-client/1.1" - - // ourUserAgent is the User-Agent of this underlying library package. - ourUserAgent = "xenolf-acme" -) - -// httpHead performs a HEAD request with a proper User-Agent string. -// The response body (resp.Body) is already closed when this function returns. -func httpHead(url string) (resp *http.Response, err error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - - req.Header.Set("User-Agent", userAgent()) - - resp, err = defaultClient.Do(req) - if err != nil { - return resp, err - } - resp.Body.Close() - return resp, err -} - -// httpPost performs a POST request with a proper User-Agent string. -// Callers should close resp.Body when done reading from it. -func httpPost(url string, bodyType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - req.Header.Set("User-Agent", userAgent()) - - return defaultClient.Do(req) -} - -// httpGet performs a GET request with a proper User-Agent string. -// Callers should close resp.Body when done reading from it. -func httpGet(url string) (resp *http.Response, err error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", userAgent()) - - return defaultClient.Do(req) -} - -// getJSON performs an HTTP GET request and parses the response body -// as JSON, into the provided respBody object. -func getJSON(uri string, respBody interface{}) (http.Header, error) { - resp, err := httpGet(uri) - if err != nil { - return nil, fmt.Errorf("failed to get %q: %v", uri, err) - } - defer resp.Body.Close() - - if resp.StatusCode >= http.StatusBadRequest { - return resp.Header, handleHTTPError(resp) - } - - return resp.Header, json.NewDecoder(resp.Body).Decode(respBody) -} - -// postJSON performs an HTTP POST request and parses the response body -// as JSON, into the provided respBody object. -func postJSON(j *jws, uri string, reqBody, respBody interface{}) (http.Header, error) { - jsonBytes, err := json.Marshal(reqBody) - if err != nil { - return nil, errors.New("Failed to marshal network message...") - } - - resp, err := j.post(uri, jsonBytes) - if err != nil { - return nil, fmt.Errorf("Failed to post JWS message. -> %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode >= http.StatusBadRequest { - return resp.Header, handleHTTPError(resp) - } - - if respBody == nil { - return resp.Header, nil - } - - return resp.Header, json.NewDecoder(resp.Body).Decode(respBody) -} - -// userAgent builds and returns the User-Agent string to use in requests. -func userAgent() string { - ua := fmt.Sprintf("%s (%s; %s) %s %s", defaultGoUserAgent, runtime.GOOS, runtime.GOARCH, ourUserAgent, UserAgent) - return strings.TrimSpace(ua) -} diff --git a/vendor/github.com/xenolf/lego/acme/http_challenge.go b/vendor/github.com/xenolf/lego/acme/http_challenge.go deleted file mode 100644 index 95cb1fd8..00000000 --- a/vendor/github.com/xenolf/lego/acme/http_challenge.go +++ /dev/null @@ -1,41 +0,0 @@ -package acme - -import ( - "fmt" - "log" -) - -type httpChallenge struct { - jws *jws - validate validateFunc - provider ChallengeProvider -} - -// HTTP01ChallengePath returns the URL path for the `http-01` challenge -func HTTP01ChallengePath(token string) string { - return "/.well-known/acme-challenge/" + token -} - -func (s *httpChallenge) Solve(chlng challenge, domain string) error { - - logf("[INFO][%s] acme: Trying to solve HTTP-01", domain) - - // Generate the Key Authorization for the challenge - keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey) - if err != nil { - return err - } - - err = s.provider.Present(domain, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("[%s] error presenting token: %v", domain, err) - } - defer func() { - err := s.provider.CleanUp(domain, chlng.Token, keyAuth) - if err != nil { - log.Printf("[%s] error cleaning up: %v", domain, err) - } - }() - - return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) -} diff --git a/vendor/github.com/xenolf/lego/acme/http_challenge_server.go b/vendor/github.com/xenolf/lego/acme/http_challenge_server.go deleted file mode 100644 index 42541380..00000000 --- a/vendor/github.com/xenolf/lego/acme/http_challenge_server.go +++ /dev/null @@ -1,79 +0,0 @@ -package acme - -import ( - "fmt" - "net" - "net/http" - "strings" -) - -// HTTPProviderServer implements ChallengeProvider for `http-01` challenge -// It may be instantiated without using the NewHTTPProviderServer function if -// you want only to use the default values. -type HTTPProviderServer struct { - iface string - port string - done chan bool - listener net.Listener -} - -// NewHTTPProviderServer creates a new HTTPProviderServer on the selected interface and port. -// Setting iface and / or port to an empty string will make the server fall back to -// the "any" interface and port 80 respectively. -func NewHTTPProviderServer(iface, port string) *HTTPProviderServer { - return &HTTPProviderServer{iface: iface, port: port} -} - -// Present starts a web server and makes the token available at `HTTP01ChallengePath(token)` for web requests. -func (s *HTTPProviderServer) Present(domain, token, keyAuth string) error { - if s.port == "" { - s.port = "80" - } - - var err error - s.listener, err = net.Listen("tcp", net.JoinHostPort(s.iface, s.port)) - if err != nil { - return fmt.Errorf("Could not start HTTP server for challenge -> %v", err) - } - - s.done = make(chan bool) - go s.serve(domain, token, keyAuth) - return nil -} - -// CleanUp closes the HTTP server and removes the token from `HTTP01ChallengePath(token)` -func (s *HTTPProviderServer) CleanUp(domain, token, keyAuth string) error { - if s.listener == nil { - return nil - } - s.listener.Close() - <-s.done - return nil -} - -func (s *HTTPProviderServer) serve(domain, token, keyAuth string) { - path := HTTP01ChallengePath(token) - - // The handler validates the HOST header and request type. - // For validation it then writes the token the server returned with the challenge - mux := http.NewServeMux() - mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { - if strings.HasPrefix(r.Host, domain) && r.Method == "GET" { - w.Header().Add("Content-Type", "text/plain") - w.Write([]byte(keyAuth)) - logf("[INFO][%s] Served key authentication", domain) - } else { - logf("[INFO] Received request for domain %s with method %s", r.Host, r.Method) - w.Write([]byte("TEST")) - } - }) - - httpServer := &http.Server{ - Handler: mux, - } - // Once httpServer is shut down we don't want any lingering - // connections, so disable KeepAlives. - httpServer.SetKeepAlivesEnabled(false) - httpServer.Serve(s.listener) - s.done <- true -} diff --git a/vendor/github.com/xenolf/lego/acme/jws.go b/vendor/github.com/xenolf/lego/acme/jws.go deleted file mode 100644 index e000bc64..00000000 --- a/vendor/github.com/xenolf/lego/acme/jws.go +++ /dev/null @@ -1,109 +0,0 @@ -package acme - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "fmt" - "net/http" - - "gopkg.in/square/go-jose.v1" -) - -type jws struct { - directoryURL string - privKey crypto.PrivateKey - nonces []string -} - -func keyAsJWK(key interface{}) *jose.JsonWebKey { - switch k := key.(type) { - case *ecdsa.PublicKey: - return &jose.JsonWebKey{Key: k, Algorithm: "EC"} - case *rsa.PublicKey: - return &jose.JsonWebKey{Key: k, Algorithm: "RSA"} - - default: - return nil - } -} - -// Posts a JWS signed message to the specified URL -func (j *jws) post(url string, content []byte) (*http.Response, error) { - signedContent, err := j.signContent(content) - if err != nil { - return nil, err - } - - resp, err := httpPost(url, "application/jose+json", bytes.NewBuffer([]byte(signedContent.FullSerialize()))) - if err != nil { - return nil, err - } - - j.getNonceFromResponse(resp) - - return resp, err -} - -func (j *jws) signContent(content []byte) (*jose.JsonWebSignature, error) { - - var alg jose.SignatureAlgorithm - switch k := j.privKey.(type) { - case *rsa.PrivateKey: - alg = jose.RS256 - case *ecdsa.PrivateKey: - if k.Curve == elliptic.P256() { - alg = jose.ES256 - } else if k.Curve == elliptic.P384() { - alg = jose.ES384 - } - } - - signer, err := jose.NewSigner(alg, j.privKey) - if err != nil { - return nil, err - } - signer.SetNonceSource(j) - - signed, err := signer.Sign(content) - if err != nil { - return nil, err - } - return signed, nil -} - -func (j *jws) getNonceFromResponse(resp *http.Response) error { - nonce := resp.Header.Get("Replay-Nonce") - if nonce == "" { - return fmt.Errorf("Server did not respond with a proper nonce header.") - } - - j.nonces = append(j.nonces, nonce) - return nil -} - -func (j *jws) getNonce() error { - resp, err := httpHead(j.directoryURL) - if err != nil { - return err - } - - return j.getNonceFromResponse(resp) -} - -func (j *jws) Nonce() (string, error) { - nonce := "" - if len(j.nonces) == 0 { - err := j.getNonce() - if err != nil { - return nonce, err - } - } - if len(j.nonces) == 0 { - return "", fmt.Errorf("Can't get nonce") - } - nonce, j.nonces = j.nonces[len(j.nonces)-1], j.nonces[:len(j.nonces)-1] - return nonce, nil -} diff --git a/vendor/github.com/xenolf/lego/acme/messages.go b/vendor/github.com/xenolf/lego/acme/messages.go deleted file mode 100644 index a6539b96..00000000 --- a/vendor/github.com/xenolf/lego/acme/messages.go +++ /dev/null @@ -1,116 +0,0 @@ -package acme - -import ( - "time" - - "gopkg.in/square/go-jose.v1" -) - -type directory struct { - NewAuthzURL string `json:"new-authz"` - NewCertURL string `json:"new-cert"` - NewRegURL string `json:"new-reg"` - RevokeCertURL string `json:"revoke-cert"` -} - -type recoveryKeyMessage struct { - Length int `json:"length,omitempty"` - Client jose.JsonWebKey `json:"client,omitempty"` - Server jose.JsonWebKey `json:"client,omitempty"` -} - -type registrationMessage struct { - Resource string `json:"resource"` - Contact []string `json:"contact"` - Delete bool `json:"delete,omitempty"` - // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"` -} - -// Registration is returned by the ACME server after the registration -// The client implementation should save this registration somewhere. -type Registration struct { - Resource string `json:"resource,omitempty"` - ID int `json:"id"` - Key jose.JsonWebKey `json:"key"` - Contact []string `json:"contact"` - Agreement string `json:"agreement,omitempty"` - Authorizations string `json:"authorizations,omitempty"` - Certificates string `json:"certificates,omitempty"` - // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"` -} - -// RegistrationResource represents all important informations about a registration -// of which the client needs to keep track itself. -type RegistrationResource struct { - Body Registration `json:"body,omitempty"` - URI string `json:"uri,omitempty"` - NewAuthzURL string `json:"new_authzr_uri,omitempty"` - TosURL string `json:"terms_of_service,omitempty"` -} - -type authorizationResource struct { - Body authorization - Domain string - NewCertURL string - AuthURL string -} - -type authorization struct { - Resource string `json:"resource,omitempty"` - Identifier identifier `json:"identifier"` - Status string `json:"status,omitempty"` - Expires time.Time `json:"expires,omitempty"` - Challenges []challenge `json:"challenges,omitempty"` - Combinations [][]int `json:"combinations,omitempty"` -} - -type identifier struct { - Type string `json:"type"` - Value string `json:"value"` -} - -type validationRecord struct { - URI string `json:"url,omitempty"` - Hostname string `json:"hostname,omitempty"` - Port string `json:"port,omitempty"` - ResolvedAddresses []string `json:"addressesResolved,omitempty"` - UsedAddress string `json:"addressUsed,omitempty"` -} - -type challenge struct { - Resource string `json:"resource,omitempty"` - Type Challenge `json:"type,omitempty"` - Status string `json:"status,omitempty"` - URI string `json:"uri,omitempty"` - Token string `json:"token,omitempty"` - KeyAuthorization string `json:"keyAuthorization,omitempty"` - TLS bool `json:"tls,omitempty"` - Iterations int `json:"n,omitempty"` - Error RemoteError `json:"error,omitempty"` - ValidationRecords []validationRecord `json:"validationRecord,omitempty"` -} - -type csrMessage struct { - Resource string `json:"resource,omitempty"` - Csr string `json:"csr"` - Authorizations []string `json:"authorizations"` -} - -type revokeCertMessage struct { - Resource string `json:"resource"` - Certificate string `json:"certificate"` -} - -// CertificateResource represents a CA issued certificate. -// PrivateKey and Certificate are both already PEM encoded -// and can be directly written to disk. Certificate may -// be a certificate bundle, depending on the options supplied -// to create it. -type CertificateResource struct { - Domain string `json:"domain"` - CertURL string `json:"certUrl"` - CertStableURL string `json:"certStableUrl"` - AccountRef string `json:"accountRef,omitempty"` - PrivateKey []byte `json:"-"` - Certificate []byte `json:"-"` -} diff --git a/vendor/github.com/xenolf/lego/acme/pop_challenge.go b/vendor/github.com/xenolf/lego/acme/pop_challenge.go deleted file mode 100644 index 8d2a213b..00000000 --- a/vendor/github.com/xenolf/lego/acme/pop_challenge.go +++ /dev/null @@ -1 +0,0 @@ -package acme diff --git a/vendor/github.com/xenolf/lego/acme/provider.go b/vendor/github.com/xenolf/lego/acme/provider.go deleted file mode 100644 index d177ff07..00000000 --- a/vendor/github.com/xenolf/lego/acme/provider.go +++ /dev/null @@ -1,28 +0,0 @@ -package acme - -import "time" - -// ChallengeProvider enables implementing a custom challenge -// provider. Present presents the solution to a challenge available to -// be solved. CleanUp will be called by the challenge if Present ends -// in a non-error state. -type ChallengeProvider interface { - Present(domain, token, keyAuth string) error - CleanUp(domain, token, keyAuth string) error -} - -// ChallengeProviderTimeout allows for implementing a -// ChallengeProvider where an unusually long timeout is required when -// waiting for an ACME challenge to be satisfied, such as when -// checking for DNS record progagation. If an implementor of a -// ChallengeProvider provides a Timeout method, then the return values -// of the Timeout method will be used when appropriate by the acme -// package. The interval value is the time between checks. -// -// The default values used for timeout and interval are 60 seconds and -// 2 seconds respectively. These are used when no Timeout method is -// defined for the ChallengeProvider. -type ChallengeProviderTimeout interface { - ChallengeProvider - Timeout() (timeout, interval time.Duration) -} diff --git a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go deleted file mode 100644 index 34383cbf..00000000 --- a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go +++ /dev/null @@ -1,67 +0,0 @@ -package acme - -import ( - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "encoding/hex" - "fmt" - "log" -) - -type tlsSNIChallenge struct { - jws *jws - validate validateFunc - provider ChallengeProvider -} - -func (t *tlsSNIChallenge) Solve(chlng challenge, domain string) error { - // FIXME: https://github.com/ietf-wg-acme/acme/pull/22 - // Currently we implement this challenge to track boulder, not the current spec! - - logf("[INFO][%s] acme: Trying to solve TLS-SNI-01", domain) - - // Generate the Key Authorization for the challenge - keyAuth, err := getKeyAuthorization(chlng.Token, t.jws.privKey) - if err != nil { - return err - } - - err = t.provider.Present(domain, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("[%s] error presenting token: %v", domain, err) - } - defer func() { - err := t.provider.CleanUp(domain, chlng.Token, keyAuth) - if err != nil { - log.Printf("[%s] error cleaning up: %v", domain, err) - } - }() - return t.validate(t.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) -} - -// TLSSNI01ChallengeCert returns a certificate and target domain for the `tls-sni-01` challenge -func TLSSNI01ChallengeCert(keyAuth string) (tls.Certificate, string, error) { - // generate a new RSA key for the certificates - tempPrivKey, err := generatePrivateKey(RSA2048) - if err != nil { - return tls.Certificate{}, "", err - } - rsaPrivKey := tempPrivKey.(*rsa.PrivateKey) - rsaPrivPEM := pemEncode(rsaPrivKey) - - zBytes := sha256.Sum256([]byte(keyAuth)) - z := hex.EncodeToString(zBytes[:sha256.Size]) - domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:]) - tempCertPEM, err := generatePemCert(rsaPrivKey, domain) - if err != nil { - return tls.Certificate{}, "", err - } - - certificate, err := tls.X509KeyPair(tempCertPEM, rsaPrivPEM) - if err != nil { - return tls.Certificate{}, "", err - } - - return certificate, domain, nil -} diff --git a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go deleted file mode 100644 index df00fbb5..00000000 --- a/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go +++ /dev/null @@ -1,62 +0,0 @@ -package acme - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" -) - -// TLSProviderServer implements ChallengeProvider for `TLS-SNI-01` challenge -// It may be instantiated without using the NewTLSProviderServer function if -// you want only to use the default values. -type TLSProviderServer struct { - iface string - port string - done chan bool - listener net.Listener -} - -// NewTLSProviderServer creates a new TLSProviderServer on the selected interface and port. -// Setting iface and / or port to an empty string will make the server fall back to -// the "any" interface and port 443 respectively. -func NewTLSProviderServer(iface, port string) *TLSProviderServer { - return &TLSProviderServer{iface: iface, port: port} -} - -// Present makes the keyAuth available as a cert -func (s *TLSProviderServer) Present(domain, token, keyAuth string) error { - if s.port == "" { - s.port = "443" - } - - cert, _, err := TLSSNI01ChallengeCert(keyAuth) - if err != nil { - return err - } - - tlsConf := new(tls.Config) - tlsConf.Certificates = []tls.Certificate{cert} - - s.listener, err = tls.Listen("tcp", net.JoinHostPort(s.iface, s.port), tlsConf) - if err != nil { - return fmt.Errorf("Could not start HTTPS server for challenge -> %v", err) - } - - s.done = make(chan bool) - go func() { - http.Serve(s.listener, nil) - s.done <- true - }() - return nil -} - -// CleanUp closes the HTTP server. -func (s *TLSProviderServer) CleanUp(domain, token, keyAuth string) error { - if s.listener == nil { - return nil - } - s.listener.Close() - <-s.done - return nil -} diff --git a/vendor/github.com/xenolf/lego/acme/utils.go b/vendor/github.com/xenolf/lego/acme/utils.go deleted file mode 100644 index 2fa0db30..00000000 --- a/vendor/github.com/xenolf/lego/acme/utils.go +++ /dev/null @@ -1,29 +0,0 @@ -package acme - -import ( - "fmt" - "time" -) - -// WaitFor polls the given function 'f', once every 'interval', up to 'timeout'. -func WaitFor(timeout, interval time.Duration, f func() (bool, error)) error { - var lastErr string - timeup := time.After(timeout) - for { - select { - case <-timeup: - return fmt.Errorf("Time limit exceeded. Last error: %s", lastErr) - default: - } - - stop, err := f() - if stop { - return nil - } - if err != nil { - lastErr = err.Error() - } - - time.Sleep(interval) - } -} diff --git a/vendor/golang.org/x/crypto/README b/vendor/golang.org/x/crypto/README deleted file mode 100644 index f1e0cbf9..00000000 --- a/vendor/golang.org/x/crypto/README +++ /dev/null @@ -1,3 +0,0 @@ -This repository holds supplementary Go cryptography libraries. - -To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/golang.org/x/crypto/README.md b/vendor/golang.org/x/crypto/README.md new file mode 100644 index 00000000..c9d6fecd --- /dev/null +++ b/vendor/golang.org/x/crypto/README.md @@ -0,0 +1,21 @@ +# Go Cryptography + +This repository holds supplementary Go cryptography libraries. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/crypto/...`. You +can also manually git clone the repository to `$GOPATH/src/golang.org/x/crypto`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the crypto repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/crypto:" in the +subject line, so it is easy to find. + +Note that contributions to the cryptography package receive additional scrutiny +due to their sensitive nature. Patches may take longer than normal to receive +feedback. diff --git a/vendor/golang.org/x/crypto/acme/acme.go b/vendor/golang.org/x/crypto/acme/acme.go new file mode 100644 index 00000000..7df64764 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/acme.go @@ -0,0 +1,922 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package acme provides an implementation of the +// Automatic Certificate Management Environment (ACME) spec. +// See https://tools.ietf.org/html/draft-ietf-acme-acme-02 for details. +// +// Most common scenarios will want to use autocert subdirectory instead, +// which provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +// +// This package is a work in progress and makes no API stability promises. +package acme + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + "net/http" + "strings" + "sync" + "time" +) + +const ( + // LetsEncryptURL is the Directory endpoint of Let's Encrypt CA. + LetsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory" + + // ALPNProto is the ALPN protocol name used by a CA server when validating + // tls-alpn-01 challenges. + // + // Package users must ensure their servers can negotiate the ACME ALPN in + // order for tls-alpn-01 challenge verifications to succeed. + // See the crypto/tls package's Config.NextProtos field. + ALPNProto = "acme-tls/1" +) + +// idPeACMEIdentifierV1 is the OID for the ACME extension for the TLS-ALPN challenge. +var idPeACMEIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 30, 1} + +const ( + maxChainLen = 5 // max depth and breadth of a certificate chain + maxCertSize = 1 << 20 // max size of a certificate, in bytes + + // Max number of collected nonces kept in memory. + // Expect usual peak of 1 or 2. + maxNonces = 100 +) + +// Client is an ACME client. +// The only required field is Key. An example of creating a client with a new key +// is as follows: +// +// key, err := rsa.GenerateKey(rand.Reader, 2048) +// if err != nil { +// log.Fatal(err) +// } +// client := &Client{Key: key} +// +type Client struct { + // Key is the account key used to register with a CA and sign requests. + // Key.Public() must return a *rsa.PublicKey or *ecdsa.PublicKey. + Key crypto.Signer + + // HTTPClient optionally specifies an HTTP client to use + // instead of http.DefaultClient. + HTTPClient *http.Client + + // DirectoryURL points to the CA directory endpoint. + // If empty, LetsEncryptURL is used. + // Mutating this value after a successful call of Client's Discover method + // will have no effect. + DirectoryURL string + + // RetryBackoff computes the duration after which the nth retry of a failed request + // should occur. The value of n for the first call on failure is 1. + // The values of r and resp are the request and response of the last failed attempt. + // If the returned value is negative or zero, no more retries are done and an error + // is returned to the caller of the original method. + // + // Requests which result in a 4xx client error are not retried, + // except for 400 Bad Request due to "bad nonce" errors and 429 Too Many Requests. + // + // If RetryBackoff is nil, a truncated exponential backoff algorithm + // with the ceiling of 10 seconds is used, where each subsequent retry n + // is done after either ("Retry-After" + jitter) or (2^n seconds + jitter), + // preferring the former if "Retry-After" header is found in the resp. + // The jitter is a random value up to 1 second. + RetryBackoff func(n int, r *http.Request, resp *http.Response) time.Duration + + dirMu sync.Mutex // guards writes to dir + dir *Directory // cached result of Client's Discover method + + noncesMu sync.Mutex + nonces map[string]struct{} // nonces collected from previous responses +} + +// Discover performs ACME server discovery using c.DirectoryURL. +// +// It caches successful result. So, subsequent calls will not result in +// a network round-trip. This also means mutating c.DirectoryURL after successful call +// of this method will have no effect. +func (c *Client) Discover(ctx context.Context) (Directory, error) { + c.dirMu.Lock() + defer c.dirMu.Unlock() + if c.dir != nil { + return *c.dir, nil + } + + dirURL := c.DirectoryURL + if dirURL == "" { + dirURL = LetsEncryptURL + } + res, err := c.get(ctx, dirURL, wantStatus(http.StatusOK)) + if err != nil { + return Directory{}, err + } + defer res.Body.Close() + c.addNonce(res.Header) + + var v struct { + Reg string `json:"new-reg"` + Authz string `json:"new-authz"` + Cert string `json:"new-cert"` + Revoke string `json:"revoke-cert"` + Meta struct { + Terms string `json:"terms-of-service"` + Website string `json:"website"` + CAA []string `json:"caa-identities"` + } + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return Directory{}, err + } + c.dir = &Directory{ + RegURL: v.Reg, + AuthzURL: v.Authz, + CertURL: v.Cert, + RevokeURL: v.Revoke, + Terms: v.Meta.Terms, + Website: v.Meta.Website, + CAA: v.Meta.CAA, + } + return *c.dir, nil +} + +// CreateCert requests a new certificate using the Certificate Signing Request csr encoded in DER format. +// The exp argument indicates the desired certificate validity duration. CA may issue a certificate +// with a different duration. +// If the bundle argument is true, the returned value will also contain the CA (issuer) certificate chain. +// +// In the case where CA server does not provide the issued certificate in the response, +// CreateCert will poll certURL using c.FetchCert, which will result in additional round-trips. +// In such a scenario, the caller can cancel the polling with ctx. +// +// CreateCert returns an error if the CA's response or chain was unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features. +func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration, bundle bool) (der [][]byte, certURL string, err error) { + if _, err := c.Discover(ctx); err != nil { + return nil, "", err + } + + req := struct { + Resource string `json:"resource"` + CSR string `json:"csr"` + NotBefore string `json:"notBefore,omitempty"` + NotAfter string `json:"notAfter,omitempty"` + }{ + Resource: "new-cert", + CSR: base64.RawURLEncoding.EncodeToString(csr), + } + now := timeNow() + req.NotBefore = now.Format(time.RFC3339) + if exp > 0 { + req.NotAfter = now.Add(exp).Format(time.RFC3339) + } + + res, err := c.post(ctx, c.Key, c.dir.CertURL, req, wantStatus(http.StatusCreated)) + if err != nil { + return nil, "", err + } + defer res.Body.Close() + + curl := res.Header.Get("Location") // cert permanent URL + if res.ContentLength == 0 { + // no cert in the body; poll until we get it + cert, err := c.FetchCert(ctx, curl, bundle) + return cert, curl, err + } + // slurp issued cert and CA chain, if requested + cert, err := c.responseCert(ctx, res, bundle) + return cert, curl, err +} + +// FetchCert retrieves already issued certificate from the given url, in DER format. +// It retries the request until the certificate is successfully retrieved, +// context is cancelled by the caller or an error response is received. +// +// The returned value will also contain the CA (issuer) certificate if the bundle argument is true. +// +// FetchCert returns an error if the CA's response or chain was unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid +// and has expected features. +func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) { + res, err := c.get(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + return c.responseCert(ctx, res, bundle) +} + +// RevokeCert revokes a previously issued certificate cert, provided in DER format. +// +// The key argument, used to sign the request, must be authorized +// to revoke the certificate. It's up to the CA to decide which keys are authorized. +// For instance, the key pair of the certificate may be authorized. +// If the key is nil, c.Key is used instead. +func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error { + if _, err := c.Discover(ctx); err != nil { + return err + } + + body := &struct { + Resource string `json:"resource"` + Cert string `json:"certificate"` + Reason int `json:"reason"` + }{ + Resource: "revoke-cert", + Cert: base64.RawURLEncoding.EncodeToString(cert), + Reason: int(reason), + } + if key == nil { + key = c.Key + } + res, err := c.post(ctx, key, c.dir.RevokeURL, body, wantStatus(http.StatusOK)) + if err != nil { + return err + } + defer res.Body.Close() + return nil +} + +// AcceptTOS always returns true to indicate the acceptance of a CA's Terms of Service +// during account registration. See Register method of Client for more details. +func AcceptTOS(tosURL string) bool { return true } + +// Register creates a new account registration by following the "new-reg" flow. +// It returns the registered account. The account is not modified. +// +// The registration may require the caller to agree to the CA's Terms of Service (TOS). +// If so, and the account has not indicated the acceptance of the terms (see Account for details), +// Register calls prompt with a TOS URL provided by the CA. Prompt should report +// whether the caller agrees to the terms. To always accept the terms, the caller can use AcceptTOS. +func (c *Client) Register(ctx context.Context, a *Account, prompt func(tosURL string) bool) (*Account, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + var err error + if a, err = c.doReg(ctx, c.dir.RegURL, "new-reg", a); err != nil { + return nil, err + } + var accept bool + if a.CurrentTerms != "" && a.CurrentTerms != a.AgreedTerms { + accept = prompt(a.CurrentTerms) + } + if accept { + a.AgreedTerms = a.CurrentTerms + a, err = c.UpdateReg(ctx, a) + } + return a, err +} + +// GetReg retrieves an existing registration. +// The url argument is an Account URI. +func (c *Client) GetReg(ctx context.Context, url string) (*Account, error) { + a, err := c.doReg(ctx, url, "reg", nil) + if err != nil { + return nil, err + } + a.URI = url + return a, nil +} + +// UpdateReg updates an existing registration. +// It returns an updated account copy. The provided account is not modified. +func (c *Client) UpdateReg(ctx context.Context, a *Account) (*Account, error) { + uri := a.URI + a, err := c.doReg(ctx, uri, "reg", a) + if err != nil { + return nil, err + } + a.URI = uri + return a, nil +} + +// Authorize performs the initial step in an authorization flow. +// The caller will then need to choose from and perform a set of returned +// challenges using c.Accept in order to successfully complete authorization. +// +// If an authorization has been previously granted, the CA may return +// a valid authorization (Authorization.Status is StatusValid). If so, the caller +// need not fulfill any challenge and can proceed to requesting a certificate. +func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + type authzID struct { + Type string `json:"type"` + Value string `json:"value"` + } + req := struct { + Resource string `json:"resource"` + Identifier authzID `json:"identifier"` + }{ + Resource: "new-authz", + Identifier: authzID{Type: "dns", Value: domain}, + } + res, err := c.post(ctx, c.Key, c.dir.AuthzURL, req, wantStatus(http.StatusCreated)) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + if v.Status != StatusPending && v.Status != StatusValid { + return nil, fmt.Errorf("acme: unexpected status: %s", v.Status) + } + return v.authorization(res.Header.Get("Location")), nil +} + +// GetAuthorization retrieves an authorization identified by the given URL. +// +// If a caller needs to poll an authorization until its status is final, +// see the WaitAuthorization method. +func (c *Client) GetAuthorization(ctx context.Context, url string) (*Authorization, error) { + res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + if err != nil { + return nil, err + } + defer res.Body.Close() + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.authorization(url), nil +} + +// RevokeAuthorization relinquishes an existing authorization identified +// by the given URL. +// The url argument is an Authorization.URI value. +// +// If successful, the caller will be required to obtain a new authorization +// using the Authorize method before being able to request a new certificate +// for the domain associated with the authorization. +// +// It does not revoke existing certificates. +func (c *Client) RevokeAuthorization(ctx context.Context, url string) error { + req := struct { + Resource string `json:"resource"` + Status string `json:"status"` + Delete bool `json:"delete"` + }{ + Resource: "authz", + Status: "deactivated", + Delete: true, + } + res, err := c.post(ctx, c.Key, url, req, wantStatus(http.StatusOK)) + if err != nil { + return err + } + defer res.Body.Close() + return nil +} + +// WaitAuthorization polls an authorization at the given URL +// until it is in one of the final states, StatusValid or StatusInvalid, +// the ACME CA responded with a 4xx error code, or the context is done. +// +// It returns a non-nil Authorization only if its Status is StatusValid. +// In all other cases WaitAuthorization returns an error. +// If the Status is StatusInvalid, the returned error is of type *AuthorizationError. +func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorization, error) { + for { + res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + if err != nil { + return nil, err + } + + var raw wireAuthz + err = json.NewDecoder(res.Body).Decode(&raw) + res.Body.Close() + switch { + case err != nil: + // Skip and retry. + case raw.Status == StatusValid: + return raw.authorization(url), nil + case raw.Status == StatusInvalid: + return nil, raw.error(url) + } + + // Exponential backoff is implemented in c.get above. + // This is just to prevent continuously hitting the CA + // while waiting for a final authorization status. + d := retryAfter(res.Header.Get("Retry-After")) + if d == 0 { + // Given that the fastest challenges TLS-SNI and HTTP-01 + // require a CA to make at least 1 network round trip + // and most likely persist a challenge state, + // this default delay seems reasonable. + d = time.Second + } + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return nil, ctx.Err() + case <-t.C: + // Retry. + } + } +} + +// GetChallenge retrieves the current status of an challenge. +// +// A client typically polls a challenge status using this method. +func (c *Client) GetChallenge(ctx context.Context, url string) (*Challenge, error) { + res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted)) + if err != nil { + return nil, err + } + defer res.Body.Close() + v := wireChallenge{URI: url} + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// Accept informs the server that the client accepts one of its challenges +// previously obtained with c.Authorize. +// +// The server will then perform the validation asynchronously. +func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error) { + auth, err := keyAuth(c.Key.Public(), chal.Token) + if err != nil { + return nil, err + } + + req := struct { + Resource string `json:"resource"` + Type string `json:"type"` + Auth string `json:"keyAuthorization"` + }{ + Resource: "challenge", + Type: chal.Type, + Auth: auth, + } + res, err := c.post(ctx, c.Key, chal.URI, req, wantStatus( + http.StatusOK, // according to the spec + http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md) + )) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v wireChallenge + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// DNS01ChallengeRecord returns a DNS record value for a dns-01 challenge response. +// A TXT record containing the returned value must be provisioned under +// "_acme-challenge" name of the domain being validated. +// +// The token argument is a Challenge.Token value. +func (c *Client) DNS01ChallengeRecord(token string) (string, error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(ka)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} + +// HTTP01ChallengeResponse returns the response for an http-01 challenge. +// Servers should respond with the value to HTTP requests at the URL path +// provided by HTTP01ChallengePath to validate the challenge and prove control +// over a domain name. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengeResponse(token string) (string, error) { + return keyAuth(c.Key.Public(), token) +} + +// HTTP01ChallengePath returns the URL path at which the response for an http-01 challenge +// should be provided by the servers. +// The response value can be obtained with HTTP01ChallengeResponse. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengePath(token string) string { + return "/.well-known/acme-challenge/" + token +} + +// TLSSNI01ChallengeCert creates a certificate for TLS-SNI-01 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. +// +// The implementation is incomplete in that the returned value is a single certificate, +// computed only for Z0 of the key authorization. ACME CAs are expected to update +// their implementations to use the newer version, TLS-SNI-02. +// For more details on TLS-SNI-01 see https://tools.ietf.org/html/draft-ietf-acme-acme-01#section-7.3. +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name of the TLS ClientHello matches exactly the returned name value. +func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b := sha256.Sum256([]byte(ka)) + h := hex.EncodeToString(b[:]) + name = fmt.Sprintf("%s.%s.acme.invalid", h[:32], h[32:]) + cert, err = tlsChallengeCert([]string{name}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, name, nil +} + +// TLSSNI02ChallengeCert creates a certificate for TLS-SNI-02 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. For more details on TLS-SNI-02 see +// https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-7.3. +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name in the TLS ClientHello matches exactly the returned name value. +func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + b := sha256.Sum256([]byte(token)) + h := hex.EncodeToString(b[:]) + sanA := fmt.Sprintf("%s.%s.token.acme.invalid", h[:32], h[32:]) + + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b = sha256.Sum256([]byte(ka)) + h = hex.EncodeToString(b[:]) + sanB := fmt.Sprintf("%s.%s.ka.acme.invalid", h[:32], h[32:]) + + cert, err = tlsChallengeCert([]string{sanA, sanB}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, sanA, nil +} + +// TLSALPN01ChallengeCert creates a certificate for TLS-ALPN-01 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. For more details on TLS-ALPN-01 see +// https://tools.ietf.org/html/draft-shoemaker-acme-tls-alpn-00#section-3 +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name in the TLS ClientHello matches the domain, and the special acme-tls/1 ALPN protocol +// has been specified. +func (c *Client) TLSALPN01ChallengeCert(token, domain string, opt ...CertOption) (cert tls.Certificate, err error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, err + } + shasum := sha256.Sum256([]byte(ka)) + extValue, err := asn1.Marshal(shasum[:]) + if err != nil { + return tls.Certificate{}, err + } + acmeExtension := pkix.Extension{ + Id: idPeACMEIdentifierV1, + Critical: true, + Value: extValue, + } + + tmpl := defaultTLSChallengeCertTemplate() + + var newOpt []CertOption + for _, o := range opt { + switch o := o.(type) { + case *certOptTemplate: + t := *(*x509.Certificate)(o) // shallow copy is ok + tmpl = &t + default: + newOpt = append(newOpt, o) + } + } + tmpl.ExtraExtensions = append(tmpl.ExtraExtensions, acmeExtension) + newOpt = append(newOpt, WithTemplate(tmpl)) + return tlsChallengeCert([]string{domain}, newOpt) +} + +// doReg sends all types of registration requests. +// The type of request is identified by typ argument, which is a "resource" +// in the ACME spec terms. +// +// A non-nil acct argument indicates whether the intention is to mutate data +// of the Account. Only Contact and Agreement of its fields are used +// in such cases. +func (c *Client) doReg(ctx context.Context, url string, typ string, acct *Account) (*Account, error) { + req := struct { + Resource string `json:"resource"` + Contact []string `json:"contact,omitempty"` + Agreement string `json:"agreement,omitempty"` + }{ + Resource: typ, + } + if acct != nil { + req.Contact = acct.Contact + req.Agreement = acct.AgreedTerms + } + res, err := c.post(ctx, c.Key, url, req, wantStatus( + http.StatusOK, // updates and deletes + http.StatusCreated, // new account creation + http.StatusAccepted, // Let's Encrypt divergent implementation + )) + if err != nil { + return nil, err + } + defer res.Body.Close() + + var v struct { + Contact []string + Agreement string + Authorizations string + Certificates string + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + var tos string + if v := linkHeader(res.Header, "terms-of-service"); len(v) > 0 { + tos = v[0] + } + var authz string + if v := linkHeader(res.Header, "next"); len(v) > 0 { + authz = v[0] + } + return &Account{ + URI: res.Header.Get("Location"), + Contact: v.Contact, + AgreedTerms: v.Agreement, + CurrentTerms: tos, + Authz: authz, + Authorizations: v.Authorizations, + Certificates: v.Certificates, + }, nil +} + +// popNonce returns a nonce value previously stored with c.addNonce +// or fetches a fresh one from the given URL. +func (c *Client) popNonce(ctx context.Context, url string) (string, error) { + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + if len(c.nonces) == 0 { + return c.fetchNonce(ctx, url) + } + var nonce string + for nonce = range c.nonces { + delete(c.nonces, nonce) + break + } + return nonce, nil +} + +// clearNonces clears any stored nonces +func (c *Client) clearNonces() { + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + c.nonces = make(map[string]struct{}) +} + +// addNonce stores a nonce value found in h (if any) for future use. +func (c *Client) addNonce(h http.Header) { + v := nonceFromHeader(h) + if v == "" { + return + } + c.noncesMu.Lock() + defer c.noncesMu.Unlock() + if len(c.nonces) >= maxNonces { + return + } + if c.nonces == nil { + c.nonces = make(map[string]struct{}) + } + c.nonces[v] = struct{}{} +} + +func (c *Client) fetchNonce(ctx context.Context, url string) (string, error) { + r, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return "", err + } + resp, err := c.doNoRetry(ctx, r) + if err != nil { + return "", err + } + defer resp.Body.Close() + nonce := nonceFromHeader(resp.Header) + if nonce == "" { + if resp.StatusCode > 299 { + return "", responseError(resp) + } + return "", errors.New("acme: nonce not found") + } + return nonce, nil +} + +func nonceFromHeader(h http.Header) string { + return h.Get("Replay-Nonce") +} + +func (c *Client) responseCert(ctx context.Context, res *http.Response, bundle bool) ([][]byte, error) { + b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) + if err != nil { + return nil, fmt.Errorf("acme: response stream: %v", err) + } + if len(b) > maxCertSize { + return nil, errors.New("acme: certificate is too big") + } + cert := [][]byte{b} + if !bundle { + return cert, nil + } + + // Append CA chain cert(s). + // At least one is required according to the spec: + // https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-6.3.1 + up := linkHeader(res.Header, "up") + if len(up) == 0 { + return nil, errors.New("acme: rel=up link not found") + } + if len(up) > maxChainLen { + return nil, errors.New("acme: rel=up link is too large") + } + for _, url := range up { + cc, err := c.chainCert(ctx, url, 0) + if err != nil { + return nil, err + } + cert = append(cert, cc...) + } + return cert, nil +} + +// chainCert fetches CA certificate chain recursively by following "up" links. +// Each recursive call increments the depth by 1, resulting in an error +// if the recursion level reaches maxChainLen. +// +// First chainCert call starts with depth of 0. +func (c *Client) chainCert(ctx context.Context, url string, depth int) ([][]byte, error) { + if depth >= maxChainLen { + return nil, errors.New("acme: certificate chain is too deep") + } + + res, err := c.get(ctx, url, wantStatus(http.StatusOK)) + if err != nil { + return nil, err + } + defer res.Body.Close() + b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) + if err != nil { + return nil, err + } + if len(b) > maxCertSize { + return nil, errors.New("acme: certificate is too big") + } + chain := [][]byte{b} + + uplink := linkHeader(res.Header, "up") + if len(uplink) > maxChainLen { + return nil, errors.New("acme: certificate chain is too large") + } + for _, up := range uplink { + cc, err := c.chainCert(ctx, up, depth+1) + if err != nil { + return nil, err + } + chain = append(chain, cc...) + } + + return chain, nil +} + +// linkHeader returns URI-Reference values of all Link headers +// with relation-type rel. +// See https://tools.ietf.org/html/rfc5988#section-5 for details. +func linkHeader(h http.Header, rel string) []string { + var links []string + for _, v := range h["Link"] { + parts := strings.Split(v, ";") + for _, p := range parts { + p = strings.TrimSpace(p) + if !strings.HasPrefix(p, "rel=") { + continue + } + if v := strings.Trim(p[4:], `"`); v == rel { + links = append(links, strings.Trim(parts[0], "<>")) + } + } + } + return links +} + +// keyAuth generates a key authorization string for a given token. +func keyAuth(pub crypto.PublicKey, token string) (string, error) { + th, err := JWKThumbprint(pub) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", token, th), nil +} + +// defaultTLSChallengeCertTemplate is a template used to create challenge certs for TLS challenges. +func defaultTLSChallengeCertTemplate() *x509.Certificate { + return &x509.Certificate{ + SerialNumber: big.NewInt(1), + NotBefore: time.Now(), + NotAfter: time.Now().Add(24 * time.Hour), + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + } +} + +// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges +// with the given SANs and auto-generated public/private key pair. +// The Subject Common Name is set to the first SAN to aid debugging. +// To create a cert with a custom key pair, specify WithKey option. +func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { + var key crypto.Signer + tmpl := defaultTLSChallengeCertTemplate() + for _, o := range opt { + switch o := o.(type) { + case *certOptKey: + if key != nil { + return tls.Certificate{}, errors.New("acme: duplicate key option") + } + key = o.key + case *certOptTemplate: + t := *(*x509.Certificate)(o) // shallow copy is ok + tmpl = &t + default: + // package's fault, if we let this happen: + panic(fmt.Sprintf("unsupported option type %T", o)) + } + } + if key == nil { + var err error + if key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { + return tls.Certificate{}, err + } + } + tmpl.DNSNames = san + if len(san) > 0 { + tmpl.Subject.CommonName = san[0] + } + + der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + if err != nil { + return tls.Certificate{}, err + } + return tls.Certificate{ + Certificate: [][]byte{der}, + PrivateKey: key, + }, nil +} + +// encodePEM returns b encoded as PEM with block of type typ. +func encodePEM(typ string, b []byte) []byte { + pb := &pem.Block{Type: typ, Bytes: b} + return pem.EncodeToMemory(pb) +} + +// timeNow is useful for testing for fixed current time. +var timeNow = time.Now diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go new file mode 100644 index 00000000..a50d9bfc --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert.go @@ -0,0 +1,1139 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package autocert provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +// +// This package is a work in progress and makes no API stability promises. +package autocert + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + mathrand "math/rand" + "net" + "net/http" + "path" + "strings" + "sync" + "time" + + "golang.org/x/crypto/acme" +) + +// createCertRetryAfter is how much time to wait before removing a failed state +// entry due to an unsuccessful createCert call. +// This is a variable instead of a const for testing. +// TODO: Consider making it configurable or an exp backoff? +var createCertRetryAfter = time.Minute + +// pseudoRand is safe for concurrent use. +var pseudoRand *lockedMathRand + +func init() { + src := mathrand.NewSource(time.Now().UnixNano()) + pseudoRand = &lockedMathRand{rnd: mathrand.New(src)} +} + +// AcceptTOS is a Manager.Prompt function that always returns true to +// indicate acceptance of the CA's Terms of Service during account +// registration. +func AcceptTOS(tosURL string) bool { return true } + +// HostPolicy specifies which host names the Manager is allowed to respond to. +// It returns a non-nil error if the host should be rejected. +// The returned error is accessible via tls.Conn.Handshake and its callers. +// See Manager's HostPolicy field and GetCertificate method docs for more details. +type HostPolicy func(ctx context.Context, host string) error + +// HostWhitelist returns a policy where only the specified host names are allowed. +// Only exact matches are currently supported. Subdomains, regexp or wildcard +// will not match. +func HostWhitelist(hosts ...string) HostPolicy { + whitelist := make(map[string]bool, len(hosts)) + for _, h := range hosts { + whitelist[h] = true + } + return func(_ context.Context, host string) error { + if !whitelist[host] { + return fmt.Errorf("acme/autocert: host %q not configured in HostWhitelist", host) + } + return nil + } +} + +// defaultHostPolicy is used when Manager.HostPolicy is not set. +func defaultHostPolicy(context.Context, string) error { + return nil +} + +// Manager is a stateful certificate manager built on top of acme.Client. +// It obtains and refreshes certificates automatically using "tls-alpn-01", +// "tls-sni-01", "tls-sni-02" and "http-01" challenge types, +// as well as providing them to a TLS server via tls.Config. +// +// You must specify a cache implementation, such as DirCache, +// to reuse obtained certificates across program restarts. +// Otherwise your server is very likely to exceed the certificate +// issuer's request rate limits. +type Manager struct { + // Prompt specifies a callback function to conditionally accept a CA's Terms of Service (TOS). + // The registration may require the caller to agree to the CA's TOS. + // If so, Manager calls Prompt with a TOS URL provided by the CA. Prompt should report + // whether the caller agrees to the terms. + // + // To always accept the terms, the callers can use AcceptTOS. + Prompt func(tosURL string) bool + + // Cache optionally stores and retrieves previously-obtained certificates + // and other state. If nil, certs will only be cached for the lifetime of + // the Manager. Multiple Managers can share the same Cache. + // + // Using a persistent Cache, such as DirCache, is strongly recommended. + Cache Cache + + // HostPolicy controls which domains the Manager will attempt + // to retrieve new certificates for. It does not affect cached certs. + // + // If non-nil, HostPolicy is called before requesting a new cert. + // If nil, all hosts are currently allowed. This is not recommended, + // as it opens a potential attack where clients connect to a server + // by IP address and pretend to be asking for an incorrect host name. + // Manager will attempt to obtain a certificate for that host, incorrectly, + // eventually reaching the CA's rate limit for certificate requests + // and making it impossible to obtain actual certificates. + // + // See GetCertificate for more details. + HostPolicy HostPolicy + + // RenewBefore optionally specifies how early certificates should + // be renewed before they expire. + // + // If zero, they're renewed 30 days before expiration. + RenewBefore time.Duration + + // Client is used to perform low-level operations, such as account registration + // and requesting new certificates. + // + // If Client is nil, a zero-value acme.Client is used with acme.LetsEncryptURL + // as directory endpoint. If the Client.Key is nil, a new ECDSA P-256 key is + // generated and, if Cache is not nil, stored in cache. + // + // Mutating the field after the first call of GetCertificate method will have no effect. + Client *acme.Client + + // Email optionally specifies a contact email address. + // This is used by CAs, such as Let's Encrypt, to notify about problems + // with issued certificates. + // + // If the Client's account key is already registered, Email is not used. + Email string + + // ForceRSA used to make the Manager generate RSA certificates. It is now ignored. + // + // Deprecated: the Manager will request the correct type of certificate based + // on what each client supports. + ForceRSA bool + + // ExtraExtensions are used when generating a new CSR (Certificate Request), + // thus allowing customization of the resulting certificate. + // For instance, TLS Feature Extension (RFC 7633) can be used + // to prevent an OCSP downgrade attack. + // + // The field value is passed to crypto/x509.CreateCertificateRequest + // in the template's ExtraExtensions field as is. + ExtraExtensions []pkix.Extension + + clientMu sync.Mutex + client *acme.Client // initialized by acmeClient method + + stateMu sync.Mutex + state map[certKey]*certState + + // renewal tracks the set of domains currently running renewal timers. + renewalMu sync.Mutex + renewal map[certKey]*domainRenewal + + // tokensMu guards the rest of the fields: tryHTTP01, certTokens and httpTokens. + tokensMu sync.RWMutex + // tryHTTP01 indicates whether the Manager should try "http-01" challenge type + // during the authorization flow. + tryHTTP01 bool + // httpTokens contains response body values for http-01 challenges + // and is keyed by the URL path at which a challenge response is expected + // to be provisioned. + // The entries are stored for the duration of the authorization flow. + httpTokens map[string][]byte + // certTokens contains temporary certificates for tls-sni and tls-alpn challenges + // and is keyed by token domain name, which matches server name of ClientHello. + // Keys always have ".acme.invalid" suffix for tls-sni. Otherwise, they are domain names + // for tls-alpn. + // The entries are stored for the duration of the authorization flow. + certTokens map[string]*tls.Certificate + // nowFunc, if not nil, returns the current time. This may be set for + // testing purposes. + nowFunc func() time.Time +} + +// certKey is the key by which certificates are tracked in state, renewal and cache. +type certKey struct { + domain string // without trailing dot + isRSA bool // RSA cert for legacy clients (as opposed to default ECDSA) + isToken bool // tls-based challenge token cert; key type is undefined regardless of isRSA +} + +func (c certKey) String() string { + if c.isToken { + return c.domain + "+token" + } + if c.isRSA { + return c.domain + "+rsa" + } + return c.domain +} + +// TLSConfig creates a new TLS config suitable for net/http.Server servers, +// supporting HTTP/2 and the tls-alpn-01 ACME challenge type. +func (m *Manager) TLSConfig() *tls.Config { + return &tls.Config{ + GetCertificate: m.GetCertificate, + NextProtos: []string{ + "h2", "http/1.1", // enable HTTP/2 + acme.ALPNProto, // enable tls-alpn ACME challenges + }, + } +} + +// GetCertificate implements the tls.Config.GetCertificate hook. +// It provides a TLS certificate for hello.ServerName host, including answering +// tls-alpn-01 and *.acme.invalid (tls-sni-01 and tls-sni-02) challenges. +// All other fields of hello are ignored. +// +// If m.HostPolicy is non-nil, GetCertificate calls the policy before requesting +// a new cert. A non-nil error returned from m.HostPolicy halts TLS negotiation. +// The error is propagated back to the caller of GetCertificate and is user-visible. +// This does not affect cached certs. See HostPolicy field description for more details. +// +// If GetCertificate is used directly, instead of via Manager.TLSConfig, package users will +// also have to add acme.ALPNProto to NextProtos for tls-alpn-01, or use HTTPHandler +// for http-01. (The tls-sni-* challenges have been deprecated by popular ACME providers +// due to security issues in the ecosystem.) +func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { + if m.Prompt == nil { + return nil, errors.New("acme/autocert: Manager.Prompt not set") + } + + name := hello.ServerName + if name == "" { + return nil, errors.New("acme/autocert: missing server name") + } + if !strings.Contains(strings.Trim(name, "."), ".") { + return nil, errors.New("acme/autocert: server name component count invalid") + } + if strings.ContainsAny(name, `+/\`) { + return nil, errors.New("acme/autocert: server name contains invalid character") + } + + // In the worst-case scenario, the timeout needs to account for caching, host policy, + // domain ownership verification and certificate issuance. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Check whether this is a token cert requested for TLS-SNI or TLS-ALPN challenge. + if wantsTokenCert(hello) { + m.tokensMu.RLock() + defer m.tokensMu.RUnlock() + // It's ok to use the same token cert key for both tls-sni and tls-alpn + // because there's always at most 1 token cert per on-going domain authorization. + // See m.verify for details. + if cert := m.certTokens[name]; cert != nil { + return cert, nil + } + if cert, err := m.cacheGet(ctx, certKey{domain: name, isToken: true}); err == nil { + return cert, nil + } + // TODO: cache error results? + return nil, fmt.Errorf("acme/autocert: no token cert for %q", name) + } + + // regular domain + ck := certKey{ + domain: strings.TrimSuffix(name, "."), // golang.org/issue/18114 + isRSA: !supportsECDSA(hello), + } + cert, err := m.cert(ctx, ck) + if err == nil { + return cert, nil + } + if err != ErrCacheMiss { + return nil, err + } + + // first-time + if err := m.hostPolicy()(ctx, name); err != nil { + return nil, err + } + cert, err = m.createCert(ctx, ck) + if err != nil { + return nil, err + } + m.cachePut(ctx, ck, cert) + return cert, nil +} + +// wantsTokenCert reports whether a TLS request with SNI is made by a CA server +// for a challenge verification. +func wantsTokenCert(hello *tls.ClientHelloInfo) bool { + // tls-alpn-01 + if len(hello.SupportedProtos) == 1 && hello.SupportedProtos[0] == acme.ALPNProto { + return true + } + // tls-sni-xx + return strings.HasSuffix(hello.ServerName, ".acme.invalid") +} + +func supportsECDSA(hello *tls.ClientHelloInfo) bool { + // The "signature_algorithms" extension, if present, limits the key exchange + // algorithms allowed by the cipher suites. See RFC 5246, section 7.4.1.4.1. + if hello.SignatureSchemes != nil { + ecdsaOK := false + schemeLoop: + for _, scheme := range hello.SignatureSchemes { + const tlsECDSAWithSHA1 tls.SignatureScheme = 0x0203 // constant added in Go 1.10 + switch scheme { + case tlsECDSAWithSHA1, tls.ECDSAWithP256AndSHA256, + tls.ECDSAWithP384AndSHA384, tls.ECDSAWithP521AndSHA512: + ecdsaOK = true + break schemeLoop + } + } + if !ecdsaOK { + return false + } + } + if hello.SupportedCurves != nil { + ecdsaOK := false + for _, curve := range hello.SupportedCurves { + if curve == tls.CurveP256 { + ecdsaOK = true + break + } + } + if !ecdsaOK { + return false + } + } + for _, suite := range hello.CipherSuites { + switch suite { + case tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: + return true + } + } + return false +} + +// HTTPHandler configures the Manager to provision ACME "http-01" challenge responses. +// It returns an http.Handler that responds to the challenges and must be +// running on port 80. If it receives a request that is not an ACME challenge, +// it delegates the request to the optional fallback handler. +// +// If fallback is nil, the returned handler redirects all GET and HEAD requests +// to the default TLS port 443 with 302 Found status code, preserving the original +// request path and query. It responds with 400 Bad Request to all other HTTP methods. +// The fallback is not protected by the optional HostPolicy. +// +// Because the fallback handler is run with unencrypted port 80 requests, +// the fallback should not serve TLS-only requests. +// +// If HTTPHandler is never called, the Manager will only use the "tls-alpn-01" +// challenge for domain verification. +func (m *Manager) HTTPHandler(fallback http.Handler) http.Handler { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + m.tryHTTP01 = true + + if fallback == nil { + fallback = http.HandlerFunc(handleHTTPRedirect) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") { + fallback.ServeHTTP(w, r) + return + } + // A reasonable context timeout for cache and host policy only, + // because we don't wait for a new certificate issuance here. + ctx, cancel := context.WithTimeout(r.Context(), time.Minute) + defer cancel() + if err := m.hostPolicy()(ctx, r.Host); err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return + } + data, err := m.httpToken(ctx, r.URL.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.Write(data) + }) +} + +func handleHTTPRedirect(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" && r.Method != "HEAD" { + http.Error(w, "Use HTTPS", http.StatusBadRequest) + return + } + target := "https://" + stripPort(r.Host) + r.URL.RequestURI() + http.Redirect(w, r, target, http.StatusFound) +} + +func stripPort(hostport string) string { + host, _, err := net.SplitHostPort(hostport) + if err != nil { + return hostport + } + return net.JoinHostPort(host, "443") +} + +// cert returns an existing certificate either from m.state or cache. +// If a certificate is found in cache but not in m.state, the latter will be filled +// with the cached value. +func (m *Manager) cert(ctx context.Context, ck certKey) (*tls.Certificate, error) { + m.stateMu.Lock() + if s, ok := m.state[ck]; ok { + m.stateMu.Unlock() + s.RLock() + defer s.RUnlock() + return s.tlscert() + } + defer m.stateMu.Unlock() + cert, err := m.cacheGet(ctx, ck) + if err != nil { + return nil, err + } + signer, ok := cert.PrivateKey.(crypto.Signer) + if !ok { + return nil, errors.New("acme/autocert: private key cannot sign") + } + if m.state == nil { + m.state = make(map[certKey]*certState) + } + s := &certState{ + key: signer, + cert: cert.Certificate, + leaf: cert.Leaf, + } + m.state[ck] = s + go m.renew(ck, s.key, s.leaf.NotAfter) + return cert, nil +} + +// cacheGet always returns a valid certificate, or an error otherwise. +// If a cached certificate exists but is not valid, ErrCacheMiss is returned. +func (m *Manager) cacheGet(ctx context.Context, ck certKey) (*tls.Certificate, error) { + if m.Cache == nil { + return nil, ErrCacheMiss + } + data, err := m.Cache.Get(ctx, ck.String()) + if err != nil { + return nil, err + } + + // private + priv, pub := pem.Decode(data) + if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { + return nil, ErrCacheMiss + } + privKey, err := parsePrivateKey(priv.Bytes) + if err != nil { + return nil, err + } + + // public + var pubDER [][]byte + for len(pub) > 0 { + var b *pem.Block + b, pub = pem.Decode(pub) + if b == nil { + break + } + pubDER = append(pubDER, b.Bytes) + } + if len(pub) > 0 { + // Leftover content not consumed by pem.Decode. Corrupt. Ignore. + return nil, ErrCacheMiss + } + + // verify and create TLS cert + leaf, err := validCert(ck, pubDER, privKey, m.now()) + if err != nil { + return nil, ErrCacheMiss + } + tlscert := &tls.Certificate{ + Certificate: pubDER, + PrivateKey: privKey, + Leaf: leaf, + } + return tlscert, nil +} + +func (m *Manager) cachePut(ctx context.Context, ck certKey, tlscert *tls.Certificate) error { + if m.Cache == nil { + return nil + } + + // contains PEM-encoded data + var buf bytes.Buffer + + // private + switch key := tlscert.PrivateKey.(type) { + case *ecdsa.PrivateKey: + if err := encodeECDSAKey(&buf, key); err != nil { + return err + } + case *rsa.PrivateKey: + b := x509.MarshalPKCS1PrivateKey(key) + pb := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return err + } + default: + return errors.New("acme/autocert: unknown private key type") + } + + // public + for _, b := range tlscert.Certificate { + pb := &pem.Block{Type: "CERTIFICATE", Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return err + } + } + + return m.Cache.Put(ctx, ck.String(), buf.Bytes()) +} + +func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error { + b, err := x509.MarshalECPrivateKey(key) + if err != nil { + return err + } + pb := &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + return pem.Encode(w, pb) +} + +// createCert starts the domain ownership verification and returns a certificate +// for that domain upon success. +// +// If the domain is already being verified, it waits for the existing verification to complete. +// Either way, createCert blocks for the duration of the whole process. +func (m *Manager) createCert(ctx context.Context, ck certKey) (*tls.Certificate, error) { + // TODO: maybe rewrite this whole piece using sync.Once + state, err := m.certState(ck) + if err != nil { + return nil, err + } + // state may exist if another goroutine is already working on it + // in which case just wait for it to finish + if !state.locked { + state.RLock() + defer state.RUnlock() + return state.tlscert() + } + + // We are the first; state is locked. + // Unblock the readers when domain ownership is verified + // and we got the cert or the process failed. + defer state.Unlock() + state.locked = false + + der, leaf, err := m.authorizedCert(ctx, state.key, ck) + if err != nil { + // Remove the failed state after some time, + // making the manager call createCert again on the following TLS hello. + time.AfterFunc(createCertRetryAfter, func() { + defer testDidRemoveState(ck) + m.stateMu.Lock() + defer m.stateMu.Unlock() + // Verify the state hasn't changed and it's still invalid + // before deleting. + s, ok := m.state[ck] + if !ok { + return + } + if _, err := validCert(ck, s.cert, s.key, m.now()); err == nil { + return + } + delete(m.state, ck) + }) + return nil, err + } + state.cert = der + state.leaf = leaf + go m.renew(ck, state.key, state.leaf.NotAfter) + return state.tlscert() +} + +// certState returns a new or existing certState. +// If a new certState is returned, state.exist is false and the state is locked. +// The returned error is non-nil only in the case where a new state could not be created. +func (m *Manager) certState(ck certKey) (*certState, error) { + m.stateMu.Lock() + defer m.stateMu.Unlock() + if m.state == nil { + m.state = make(map[certKey]*certState) + } + // existing state + if state, ok := m.state[ck]; ok { + return state, nil + } + + // new locked state + var ( + err error + key crypto.Signer + ) + if ck.isRSA { + key, err = rsa.GenerateKey(rand.Reader, 2048) + } else { + key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + } + if err != nil { + return nil, err + } + + state := &certState{ + key: key, + locked: true, + } + state.Lock() // will be unlocked by m.certState caller + m.state[ck] = state + return state, nil +} + +// authorizedCert starts the domain ownership verification process and requests a new cert upon success. +// The key argument is the certificate private key. +func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, ck certKey) (der [][]byte, leaf *x509.Certificate, err error) { + client, err := m.acmeClient(ctx) + if err != nil { + return nil, nil, err + } + + if err := m.verify(ctx, client, ck.domain); err != nil { + return nil, nil, err + } + csr, err := certRequest(key, ck.domain, m.ExtraExtensions) + if err != nil { + return nil, nil, err + } + der, _, err = client.CreateCert(ctx, csr, 0, true) + if err != nil { + return nil, nil, err + } + leaf, err = validCert(ck, der, key, m.now()) + if err != nil { + return nil, nil, err + } + return der, leaf, nil +} + +// revokePendingAuthz revokes all authorizations idenfied by the elements of uri slice. +// It ignores revocation errors. +func (m *Manager) revokePendingAuthz(ctx context.Context, uri []string) { + client, err := m.acmeClient(ctx) + if err != nil { + return + } + for _, u := range uri { + client.RevokeAuthorization(ctx, u) + } +} + +// verify runs the identifier (domain) authorization flow +// using each applicable ACME challenge type. +func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string) error { + // The list of challenge types we'll try to fulfill + // in this specific order. + challengeTypes := []string{"tls-alpn-01", "tls-sni-02", "tls-sni-01"} + m.tokensMu.RLock() + if m.tryHTTP01 { + challengeTypes = append(challengeTypes, "http-01") + } + m.tokensMu.RUnlock() + + // Keep track of pending authzs and revoke the ones that did not validate. + pendingAuthzs := make(map[string]bool) + defer func() { + var uri []string + for k, pending := range pendingAuthzs { + if pending { + uri = append(uri, k) + } + } + if len(uri) > 0 { + // Use "detached" background context. + // The revocations need not happen in the current verification flow. + go m.revokePendingAuthz(context.Background(), uri) + } + }() + + // errs accumulates challenge failure errors, printed if all fail + errs := make(map[*acme.Challenge]error) + var nextTyp int // challengeType index of the next challenge type to try + for { + // Start domain authorization and get the challenge. + authz, err := client.Authorize(ctx, domain) + if err != nil { + return err + } + // No point in accepting challenges if the authorization status + // is in a final state. + switch authz.Status { + case acme.StatusValid: + return nil // already authorized + case acme.StatusInvalid: + return fmt.Errorf("acme/autocert: invalid authorization %q", authz.URI) + } + + pendingAuthzs[authz.URI] = true + + // Pick the next preferred challenge. + var chal *acme.Challenge + for chal == nil && nextTyp < len(challengeTypes) { + chal = pickChallenge(challengeTypes[nextTyp], authz.Challenges) + nextTyp++ + } + if chal == nil { + errorMsg := fmt.Sprintf("acme/autocert: unable to authorize %q", domain) + for chal, err := range errs { + errorMsg += fmt.Sprintf("; challenge %q failed with error: %v", chal.Type, err) + } + return errors.New(errorMsg) + } + cleanup, err := m.fulfill(ctx, client, chal, domain) + if err != nil { + errs[chal] = err + continue + } + defer cleanup() + if _, err := client.Accept(ctx, chal); err != nil { + errs[chal] = err + continue + } + + // A challenge is fulfilled and accepted: wait for the CA to validate. + if _, err := client.WaitAuthorization(ctx, authz.URI); err != nil { + errs[chal] = err + continue + } + delete(pendingAuthzs, authz.URI) + return nil + } +} + +// fulfill provisions a response to the challenge chal. +// The cleanup is non-nil only if provisioning succeeded. +func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge, domain string) (cleanup func(), err error) { + switch chal.Type { + case "tls-alpn-01": + cert, err := client.TLSALPN01ChallengeCert(chal.Token, domain) + if err != nil { + return nil, err + } + m.putCertToken(ctx, domain, &cert) + return func() { go m.deleteCertToken(domain) }, nil + case "tls-sni-01": + cert, name, err := client.TLSSNI01ChallengeCert(chal.Token) + if err != nil { + return nil, err + } + m.putCertToken(ctx, name, &cert) + return func() { go m.deleteCertToken(name) }, nil + case "tls-sni-02": + cert, name, err := client.TLSSNI02ChallengeCert(chal.Token) + if err != nil { + return nil, err + } + m.putCertToken(ctx, name, &cert) + return func() { go m.deleteCertToken(name) }, nil + case "http-01": + resp, err := client.HTTP01ChallengeResponse(chal.Token) + if err != nil { + return nil, err + } + p := client.HTTP01ChallengePath(chal.Token) + m.putHTTPToken(ctx, p, resp) + return func() { go m.deleteHTTPToken(p) }, nil + } + return nil, fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type) +} + +func pickChallenge(typ string, chal []*acme.Challenge) *acme.Challenge { + for _, c := range chal { + if c.Type == typ { + return c + } + } + return nil +} + +// putCertToken stores the token certificate with the specified name +// in both m.certTokens map and m.Cache. +func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certificate) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + if m.certTokens == nil { + m.certTokens = make(map[string]*tls.Certificate) + } + m.certTokens[name] = cert + m.cachePut(ctx, certKey{domain: name, isToken: true}, cert) +} + +// deleteCertToken removes the token certificate with the specified name +// from both m.certTokens map and m.Cache. +func (m *Manager) deleteCertToken(name string) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + delete(m.certTokens, name) + if m.Cache != nil { + ck := certKey{domain: name, isToken: true} + m.Cache.Delete(context.Background(), ck.String()) + } +} + +// httpToken retrieves an existing http-01 token value from an in-memory map +// or the optional cache. +func (m *Manager) httpToken(ctx context.Context, tokenPath string) ([]byte, error) { + m.tokensMu.RLock() + defer m.tokensMu.RUnlock() + if v, ok := m.httpTokens[tokenPath]; ok { + return v, nil + } + if m.Cache == nil { + return nil, fmt.Errorf("acme/autocert: no token at %q", tokenPath) + } + return m.Cache.Get(ctx, httpTokenCacheKey(tokenPath)) +} + +// putHTTPToken stores an http-01 token value using tokenPath as key +// in both in-memory map and the optional Cache. +// +// It ignores any error returned from Cache.Put. +func (m *Manager) putHTTPToken(ctx context.Context, tokenPath, val string) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + if m.httpTokens == nil { + m.httpTokens = make(map[string][]byte) + } + b := []byte(val) + m.httpTokens[tokenPath] = b + if m.Cache != nil { + m.Cache.Put(ctx, httpTokenCacheKey(tokenPath), b) + } +} + +// deleteHTTPToken removes an http-01 token value from both in-memory map +// and the optional Cache, ignoring any error returned from the latter. +// +// If m.Cache is non-nil, it blocks until Cache.Delete returns without a timeout. +func (m *Manager) deleteHTTPToken(tokenPath string) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + delete(m.httpTokens, tokenPath) + if m.Cache != nil { + m.Cache.Delete(context.Background(), httpTokenCacheKey(tokenPath)) + } +} + +// httpTokenCacheKey returns a key at which an http-01 token value may be stored +// in the Manager's optional Cache. +func httpTokenCacheKey(tokenPath string) string { + return path.Base(tokenPath) + "+http-01" +} + +// renew starts a cert renewal timer loop, one per domain. +// +// The loop is scheduled in two cases: +// - a cert was fetched from cache for the first time (wasn't in m.state) +// - a new cert was created by m.createCert +// +// The key argument is a certificate private key. +// The exp argument is the cert expiration time (NotAfter). +func (m *Manager) renew(ck certKey, key crypto.Signer, exp time.Time) { + m.renewalMu.Lock() + defer m.renewalMu.Unlock() + if m.renewal[ck] != nil { + // another goroutine is already on it + return + } + if m.renewal == nil { + m.renewal = make(map[certKey]*domainRenewal) + } + dr := &domainRenewal{m: m, ck: ck, key: key} + m.renewal[ck] = dr + dr.start(exp) +} + +// stopRenew stops all currently running cert renewal timers. +// The timers are not restarted during the lifetime of the Manager. +func (m *Manager) stopRenew() { + m.renewalMu.Lock() + defer m.renewalMu.Unlock() + for name, dr := range m.renewal { + delete(m.renewal, name) + dr.stop() + } +} + +func (m *Manager) accountKey(ctx context.Context) (crypto.Signer, error) { + const keyName = "acme_account+key" + + // Previous versions of autocert stored the value under a different key. + const legacyKeyName = "acme_account.key" + + genKey := func() (*ecdsa.PrivateKey, error) { + return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + } + + if m.Cache == nil { + return genKey() + } + + data, err := m.Cache.Get(ctx, keyName) + if err == ErrCacheMiss { + data, err = m.Cache.Get(ctx, legacyKeyName) + } + if err == ErrCacheMiss { + key, err := genKey() + if err != nil { + return nil, err + } + var buf bytes.Buffer + if err := encodeECDSAKey(&buf, key); err != nil { + return nil, err + } + if err := m.Cache.Put(ctx, keyName, buf.Bytes()); err != nil { + return nil, err + } + return key, nil + } + if err != nil { + return nil, err + } + + priv, _ := pem.Decode(data) + if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { + return nil, errors.New("acme/autocert: invalid account key found in cache") + } + return parsePrivateKey(priv.Bytes) +} + +func (m *Manager) acmeClient(ctx context.Context) (*acme.Client, error) { + m.clientMu.Lock() + defer m.clientMu.Unlock() + if m.client != nil { + return m.client, nil + } + + client := m.Client + if client == nil { + client = &acme.Client{DirectoryURL: acme.LetsEncryptURL} + } + if client.Key == nil { + var err error + client.Key, err = m.accountKey(ctx) + if err != nil { + return nil, err + } + } + var contact []string + if m.Email != "" { + contact = []string{"mailto:" + m.Email} + } + a := &acme.Account{Contact: contact} + _, err := client.Register(ctx, a, m.Prompt) + if ae, ok := err.(*acme.Error); err == nil || ok && ae.StatusCode == http.StatusConflict { + // conflict indicates the key is already registered + m.client = client + err = nil + } + return m.client, err +} + +func (m *Manager) hostPolicy() HostPolicy { + if m.HostPolicy != nil { + return m.HostPolicy + } + return defaultHostPolicy +} + +func (m *Manager) renewBefore() time.Duration { + if m.RenewBefore > renewJitter { + return m.RenewBefore + } + return 720 * time.Hour // 30 days +} + +func (m *Manager) now() time.Time { + if m.nowFunc != nil { + return m.nowFunc() + } + return time.Now() +} + +// certState is ready when its mutex is unlocked for reading. +type certState struct { + sync.RWMutex + locked bool // locked for read/write + key crypto.Signer // private key for cert + cert [][]byte // DER encoding + leaf *x509.Certificate // parsed cert[0]; always non-nil if cert != nil +} + +// tlscert creates a tls.Certificate from s.key and s.cert. +// Callers should wrap it in s.RLock() and s.RUnlock(). +func (s *certState) tlscert() (*tls.Certificate, error) { + if s.key == nil { + return nil, errors.New("acme/autocert: missing signer") + } + if len(s.cert) == 0 { + return nil, errors.New("acme/autocert: missing certificate") + } + return &tls.Certificate{ + PrivateKey: s.key, + Certificate: s.cert, + Leaf: s.leaf, + }, nil +} + +// certRequest generates a CSR for the given common name cn and optional SANs. +func certRequest(key crypto.Signer, cn string, ext []pkix.Extension, san ...string) ([]byte, error) { + req := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: cn}, + DNSNames: san, + ExtraExtensions: ext, + } + return x509.CreateCertificateRequest(rand.Reader, req, key) +} + +// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates +// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys. +// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three. +// +// Inspired by parsePrivateKey in crypto/tls/tls.go. +func parsePrivateKey(der []byte) (crypto.Signer, error) { + if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { + return key, nil + } + if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { + switch key := key.(type) { + case *rsa.PrivateKey: + return key, nil + case *ecdsa.PrivateKey: + return key, nil + default: + return nil, errors.New("acme/autocert: unknown private key type in PKCS#8 wrapping") + } + } + if key, err := x509.ParseECPrivateKey(der); err == nil { + return key, nil + } + + return nil, errors.New("acme/autocert: failed to parse private key") +} + +// validCert parses a cert chain provided as der argument and verifies the leaf and der[0] +// correspond to the private key, the domain and key type match, and expiration dates +// are valid. It doesn't do any revocation checking. +// +// The returned value is the verified leaf cert. +func validCert(ck certKey, der [][]byte, key crypto.Signer, now time.Time) (leaf *x509.Certificate, err error) { + // parse public part(s) + var n int + for _, b := range der { + n += len(b) + } + pub := make([]byte, n) + n = 0 + for _, b := range der { + n += copy(pub[n:], b) + } + x509Cert, err := x509.ParseCertificates(pub) + if err != nil || len(x509Cert) == 0 { + return nil, errors.New("acme/autocert: no public key found") + } + // verify the leaf is not expired and matches the domain name + leaf = x509Cert[0] + if now.Before(leaf.NotBefore) { + return nil, errors.New("acme/autocert: certificate is not valid yet") + } + if now.After(leaf.NotAfter) { + return nil, errors.New("acme/autocert: expired certificate") + } + if err := leaf.VerifyHostname(ck.domain); err != nil { + return nil, err + } + // ensure the leaf corresponds to the private key and matches the certKey type + switch pub := leaf.PublicKey.(type) { + case *rsa.PublicKey: + prv, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("acme/autocert: private key type does not match public key type") + } + if pub.N.Cmp(prv.N) != 0 { + return nil, errors.New("acme/autocert: private key does not match public key") + } + if !ck.isRSA && !ck.isToken { + return nil, errors.New("acme/autocert: key type does not match expected value") + } + case *ecdsa.PublicKey: + prv, ok := key.(*ecdsa.PrivateKey) + if !ok { + return nil, errors.New("acme/autocert: private key type does not match public key type") + } + if pub.X.Cmp(prv.X) != 0 || pub.Y.Cmp(prv.Y) != 0 { + return nil, errors.New("acme/autocert: private key does not match public key") + } + if ck.isRSA && !ck.isToken { + return nil, errors.New("acme/autocert: key type does not match expected value") + } + default: + return nil, errors.New("acme/autocert: unknown public key algorithm") + } + return leaf, nil +} + +type lockedMathRand struct { + sync.Mutex + rnd *mathrand.Rand +} + +func (r *lockedMathRand) int63n(max int64) int64 { + r.Lock() + n := r.rnd.Int63n(max) + r.Unlock() + return n +} + +// For easier testing. +var ( + // Called when a state is removed. + testDidRemoveState = func(certKey) {} +) diff --git a/vendor/golang.org/x/crypto/acme/autocert/cache.go b/vendor/golang.org/x/crypto/acme/autocert/cache.go new file mode 100644 index 00000000..aa9aa845 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/cache.go @@ -0,0 +1,130 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "errors" + "io/ioutil" + "os" + "path/filepath" +) + +// ErrCacheMiss is returned when a certificate is not found in cache. +var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss") + +// Cache is used by Manager to store and retrieve previously obtained certificates +// and other account data as opaque blobs. +// +// Cache implementations should not rely on the key naming pattern. Keys can +// include any printable ASCII characters, except the following: \/:*?"<>| +type Cache interface { + // Get returns a certificate data for the specified key. + // If there's no such key, Get returns ErrCacheMiss. + Get(ctx context.Context, key string) ([]byte, error) + + // Put stores the data in the cache under the specified key. + // Underlying implementations may use any data storage format, + // as long as the reverse operation, Get, results in the original data. + Put(ctx context.Context, key string, data []byte) error + + // Delete removes a certificate data from the cache under the specified key. + // If there's no such key in the cache, Delete returns nil. + Delete(ctx context.Context, key string) error +} + +// DirCache implements Cache using a directory on the local filesystem. +// If the directory does not exist, it will be created with 0700 permissions. +type DirCache string + +// Get reads a certificate data from the specified file name. +func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) { + name = filepath.Join(string(d), name) + var ( + data []byte + err error + done = make(chan struct{}) + ) + go func() { + data, err = ioutil.ReadFile(name) + close(done) + }() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-done: + } + if os.IsNotExist(err) { + return nil, ErrCacheMiss + } + return data, err +} + +// Put writes the certificate data to the specified file name. +// The file will be created with 0600 permissions. +func (d DirCache) Put(ctx context.Context, name string, data []byte) error { + if err := os.MkdirAll(string(d), 0700); err != nil { + return err + } + + done := make(chan struct{}) + var err error + go func() { + defer close(done) + var tmp string + if tmp, err = d.writeTempFile(name, data); err != nil { + return + } + select { + case <-ctx.Done(): + // Don't overwrite the file if the context was canceled. + default: + newName := filepath.Join(string(d), name) + err = os.Rename(tmp, newName) + } + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } + return err +} + +// Delete removes the specified file name. +func (d DirCache) Delete(ctx context.Context, name string) error { + name = filepath.Join(string(d), name) + var ( + err error + done = make(chan struct{}) + ) + go func() { + err = os.Remove(name) + close(done) + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// writeTempFile writes b to a temporary file, closes the file and returns its path. +func (d DirCache) writeTempFile(prefix string, b []byte) (string, error) { + // TempFile uses 0600 permissions + f, err := ioutil.TempFile(string(d), prefix) + if err != nil { + return "", err + } + if _, err := f.Write(b); err != nil { + f.Close() + return "", err + } + return f.Name(), f.Close() +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/listener.go b/vendor/golang.org/x/crypto/acme/autocert/listener.go new file mode 100644 index 00000000..1e069818 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/listener.go @@ -0,0 +1,157 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "crypto/tls" + "log" + "net" + "os" + "path/filepath" + "runtime" + "time" +) + +// NewListener returns a net.Listener that listens on the standard TLS +// port (443) on all interfaces and returns *tls.Conn connections with +// LetsEncrypt certificates for the provided domain or domains. +// +// It enables one-line HTTPS servers: +// +// log.Fatal(http.Serve(autocert.NewListener("example.com"), handler)) +// +// NewListener is a convenience function for a common configuration. +// More complex or custom configurations can use the autocert.Manager +// type instead. +// +// Use of this function implies acceptance of the LetsEncrypt Terms of +// Service. If domains is not empty, the provided domains are passed +// to HostWhitelist. If domains is empty, the listener will do +// LetsEncrypt challenges for any requested domain, which is not +// recommended. +// +// Certificates are cached in a "golang-autocert" directory under an +// operating system-specific cache or temp directory. This may not +// be suitable for servers spanning multiple machines. +// +// The returned listener uses a *tls.Config that enables HTTP/2, and +// should only be used with servers that support HTTP/2. +// +// The returned Listener also enables TCP keep-alives on the accepted +// connections. The returned *tls.Conn are returned before their TLS +// handshake has completed. +func NewListener(domains ...string) net.Listener { + m := &Manager{ + Prompt: AcceptTOS, + } + if len(domains) > 0 { + m.HostPolicy = HostWhitelist(domains...) + } + dir := cacheDir() + if err := os.MkdirAll(dir, 0700); err != nil { + log.Printf("warning: autocert.NewListener not using a cache: %v", err) + } else { + m.Cache = DirCache(dir) + } + return m.Listener() +} + +// Listener listens on the standard TLS port (443) on all interfaces +// and returns a net.Listener returning *tls.Conn connections. +// +// The returned listener uses a *tls.Config that enables HTTP/2, and +// should only be used with servers that support HTTP/2. +// +// The returned Listener also enables TCP keep-alives on the accepted +// connections. The returned *tls.Conn are returned before their TLS +// handshake has completed. +// +// Unlike NewListener, it is the caller's responsibility to initialize +// the Manager m's Prompt, Cache, HostPolicy, and other desired options. +func (m *Manager) Listener() net.Listener { + ln := &listener{ + m: m, + conf: m.TLSConfig(), + } + ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443") + return ln +} + +type listener struct { + m *Manager + conf *tls.Config + + tcpListener net.Listener + tcpListenErr error +} + +func (ln *listener) Accept() (net.Conn, error) { + if ln.tcpListenErr != nil { + return nil, ln.tcpListenErr + } + conn, err := ln.tcpListener.Accept() + if err != nil { + return nil, err + } + tcpConn := conn.(*net.TCPConn) + + // Because Listener is a convenience function, help out with + // this too. This is not possible for the caller to set once + // we return a *tcp.Conn wrapping an inaccessible net.Conn. + // If callers don't want this, they can do things the manual + // way and tweak as needed. But this is what net/http does + // itself, so copy that. If net/http changes, we can change + // here too. + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(3 * time.Minute) + + return tls.Server(tcpConn, ln.conf), nil +} + +func (ln *listener) Addr() net.Addr { + if ln.tcpListener != nil { + return ln.tcpListener.Addr() + } + // net.Listen failed. Return something non-nil in case callers + // call Addr before Accept: + return &net.TCPAddr{IP: net.IP{0, 0, 0, 0}, Port: 443} +} + +func (ln *listener) Close() error { + if ln.tcpListenErr != nil { + return ln.tcpListenErr + } + return ln.tcpListener.Close() +} + +func homeDir() string { + if runtime.GOOS == "windows" { + return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + } + if h := os.Getenv("HOME"); h != "" { + return h + } + return "/" +} + +func cacheDir() string { + const base = "golang-autocert" + switch runtime.GOOS { + case "darwin": + return filepath.Join(homeDir(), "Library", "Caches", base) + case "windows": + for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} { + if v := os.Getenv(ev); v != "" { + return filepath.Join(v, base) + } + } + // Worst case: + return filepath.Join(homeDir(), base) + } + if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" { + return filepath.Join(xdg, base) + } + return filepath.Join(homeDir(), ".cache", base) +} diff --git a/vendor/golang.org/x/crypto/acme/autocert/renewal.go b/vendor/golang.org/x/crypto/acme/autocert/renewal.go new file mode 100644 index 00000000..665f870d --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/autocert/renewal.go @@ -0,0 +1,141 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "context" + "crypto" + "sync" + "time" +) + +// renewJitter is the maximum deviation from Manager.RenewBefore. +const renewJitter = time.Hour + +// domainRenewal tracks the state used by the periodic timers +// renewing a single domain's cert. +type domainRenewal struct { + m *Manager + ck certKey + key crypto.Signer + + timerMu sync.Mutex + timer *time.Timer +} + +// start starts a cert renewal timer at the time +// defined by the certificate expiration time exp. +// +// If the timer is already started, calling start is a noop. +func (dr *domainRenewal) start(exp time.Time) { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer != nil { + return + } + dr.timer = time.AfterFunc(dr.next(exp), dr.renew) +} + +// stop stops the cert renewal timer. +// If the timer is already stopped, calling stop is a noop. +func (dr *domainRenewal) stop() { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer == nil { + return + } + dr.timer.Stop() + dr.timer = nil +} + +// renew is called periodically by a timer. +// The first renew call is kicked off by dr.start. +func (dr *domainRenewal) renew() { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer == nil { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + // TODO: rotate dr.key at some point? + next, err := dr.do(ctx) + if err != nil { + next = renewJitter / 2 + next += time.Duration(pseudoRand.int63n(int64(next))) + } + dr.timer = time.AfterFunc(next, dr.renew) + testDidRenewLoop(next, err) +} + +// updateState locks and replaces the relevant Manager.state item with the given +// state. It additionally updates dr.key with the given state's key. +func (dr *domainRenewal) updateState(state *certState) { + dr.m.stateMu.Lock() + defer dr.m.stateMu.Unlock() + dr.key = state.key + dr.m.state[dr.ck] = state +} + +// do is similar to Manager.createCert but it doesn't lock a Manager.state item. +// Instead, it requests a new certificate independently and, upon success, +// replaces dr.m.state item with a new one and updates cache for the given domain. +// +// It may lock and update the Manager.state if the expiration date of the currently +// cached cert is far enough in the future. +// +// The returned value is a time interval after which the renewal should occur again. +func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { + // a race is likely unavoidable in a distributed environment + // but we try nonetheless + if tlscert, err := dr.m.cacheGet(ctx, dr.ck); err == nil { + next := dr.next(tlscert.Leaf.NotAfter) + if next > dr.m.renewBefore()+renewJitter { + signer, ok := tlscert.PrivateKey.(crypto.Signer) + if ok { + state := &certState{ + key: signer, + cert: tlscert.Certificate, + leaf: tlscert.Leaf, + } + dr.updateState(state) + return next, nil + } + } + } + + der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.ck) + if err != nil { + return 0, err + } + state := &certState{ + key: dr.key, + cert: der, + leaf: leaf, + } + tlscert, err := state.tlscert() + if err != nil { + return 0, err + } + if err := dr.m.cachePut(ctx, dr.ck, tlscert); err != nil { + return 0, err + } + dr.updateState(state) + return dr.next(leaf.NotAfter), nil +} + +func (dr *domainRenewal) next(expiry time.Time) time.Duration { + d := expiry.Sub(dr.m.now()) - dr.m.renewBefore() + // add a bit of randomness to renew deadline + n := pseudoRand.int63n(int64(renewJitter)) + d -= time.Duration(n) + if d < 0 { + return 0 + } + return d +} + +var testDidRenewLoop = func(next time.Duration, err error) {} diff --git a/vendor/golang.org/x/crypto/acme/http.go b/vendor/golang.org/x/crypto/acme/http.go new file mode 100644 index 00000000..a43ce6a5 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/http.go @@ -0,0 +1,281 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "bytes" + "context" + "crypto" + "crypto/rand" + "encoding/json" + "fmt" + "io/ioutil" + "math/big" + "net/http" + "strconv" + "strings" + "time" +) + +// retryTimer encapsulates common logic for retrying unsuccessful requests. +// It is not safe for concurrent use. +type retryTimer struct { + // backoffFn provides backoff delay sequence for retries. + // See Client.RetryBackoff doc comment. + backoffFn func(n int, r *http.Request, res *http.Response) time.Duration + // n is the current retry attempt. + n int +} + +func (t *retryTimer) inc() { + t.n++ +} + +// backoff pauses the current goroutine as described in Client.RetryBackoff. +func (t *retryTimer) backoff(ctx context.Context, r *http.Request, res *http.Response) error { + d := t.backoffFn(t.n, r, res) + if d <= 0 { + return fmt.Errorf("acme: no more retries for %s; tried %d time(s)", r.URL, t.n) + } + wakeup := time.NewTimer(d) + defer wakeup.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-wakeup.C: + return nil + } +} + +func (c *Client) retryTimer() *retryTimer { + f := c.RetryBackoff + if f == nil { + f = defaultBackoff + } + return &retryTimer{backoffFn: f} +} + +// defaultBackoff provides default Client.RetryBackoff implementation +// using a truncated exponential backoff algorithm, +// as described in Client.RetryBackoff. +// +// The n argument is always bounded between 1 and 30. +// The returned value is always greater than 0. +func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration { + const max = 10 * time.Second + var jitter time.Duration + if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil { + // Set the minimum to 1ms to avoid a case where + // an invalid Retry-After value is parsed into 0 below, + // resulting in the 0 returned value which would unintentionally + // stop the retries. + jitter = (1 + time.Duration(x.Int64())) * time.Millisecond + } + if v, ok := res.Header["Retry-After"]; ok { + return retryAfter(v[0]) + jitter + } + + if n < 1 { + n = 1 + } + if n > 30 { + n = 30 + } + d := time.Duration(1< max { + return max + } + return d +} + +// retryAfter parses a Retry-After HTTP header value, +// trying to convert v into an int (seconds) or use http.ParseTime otherwise. +// It returns zero value if v cannot be parsed. +func retryAfter(v string) time.Duration { + if i, err := strconv.Atoi(v); err == nil { + return time.Duration(i) * time.Second + } + t, err := http.ParseTime(v) + if err != nil { + return 0 + } + return t.Sub(timeNow()) +} + +// resOkay is a function that reports whether the provided response is okay. +// It is expected to keep the response body unread. +type resOkay func(*http.Response) bool + +// wantStatus returns a function which reports whether the code +// matches the status code of a response. +func wantStatus(codes ...int) resOkay { + return func(res *http.Response) bool { + for _, code := range codes { + if code == res.StatusCode { + return true + } + } + return false + } +} + +// get issues an unsigned GET request to the specified URL. +// It returns a non-error value only when ok reports true. +// +// get retries unsuccessful attempts according to c.RetryBackoff +// until the context is done or a non-retriable error is received. +func (c *Client) get(ctx context.Context, url string, ok resOkay) (*http.Response, error) { + retry := c.retryTimer() + for { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + res, err := c.doNoRetry(ctx, req) + switch { + case err != nil: + return nil, err + case ok(res): + return res, nil + case isRetriable(res.StatusCode): + retry.inc() + resErr := responseError(res) + res.Body.Close() + // Ignore the error value from retry.backoff + // and return the one from last retry, as received from the CA. + if retry.backoff(ctx, req, res) != nil { + return nil, resErr + } + default: + defer res.Body.Close() + return nil, responseError(res) + } + } +} + +// post issues a signed POST request in JWS format using the provided key +// to the specified URL. +// It returns a non-error value only when ok reports true. +// +// post retries unsuccessful attempts according to c.RetryBackoff +// until the context is done or a non-retriable error is received. +// It uses postNoRetry to make individual requests. +func (c *Client) post(ctx context.Context, key crypto.Signer, url string, body interface{}, ok resOkay) (*http.Response, error) { + retry := c.retryTimer() + for { + res, req, err := c.postNoRetry(ctx, key, url, body) + if err != nil { + return nil, err + } + if ok(res) { + return res, nil + } + resErr := responseError(res) + res.Body.Close() + switch { + // Check for bad nonce before isRetriable because it may have been returned + // with an unretriable response code such as 400 Bad Request. + case isBadNonce(resErr): + // Consider any previously stored nonce values to be invalid. + c.clearNonces() + case !isRetriable(res.StatusCode): + return nil, resErr + } + retry.inc() + // Ignore the error value from retry.backoff + // and return the one from last retry, as received from the CA. + if err := retry.backoff(ctx, req, res); err != nil { + return nil, resErr + } + } +} + +// postNoRetry signs the body with the given key and POSTs it to the provided url. +// The body argument must be JSON-serializable. +// It is used by c.post to retry unsuccessful attempts. +func (c *Client) postNoRetry(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, *http.Request, error) { + nonce, err := c.popNonce(ctx, url) + if err != nil { + return nil, nil, err + } + b, err := jwsEncodeJSON(body, key, nonce) + if err != nil { + return nil, nil, err + } + req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + if err != nil { + return nil, nil, err + } + req.Header.Set("Content-Type", "application/jose+json") + res, err := c.doNoRetry(ctx, req) + if err != nil { + return nil, nil, err + } + c.addNonce(res.Header) + return res, req, nil +} + +// doNoRetry issues a request req, replacing its context (if any) with ctx. +func (c *Client) doNoRetry(ctx context.Context, req *http.Request) (*http.Response, error) { + res, err := c.httpClient().Do(req.WithContext(ctx)) + if err != nil { + select { + case <-ctx.Done(): + // Prefer the unadorned context error. + // (The acme package had tests assuming this, previously from ctxhttp's + // behavior, predating net/http supporting contexts natively) + // TODO(bradfitz): reconsider this in the future. But for now this + // requires no test updates. + return nil, ctx.Err() + default: + return nil, err + } + } + return res, nil +} + +func (c *Client) httpClient() *http.Client { + if c.HTTPClient != nil { + return c.HTTPClient + } + return http.DefaultClient +} + +// isBadNonce reports whether err is an ACME "badnonce" error. +func isBadNonce(err error) bool { + // According to the spec badNonce is urn:ietf:params:acme:error:badNonce. + // However, ACME servers in the wild return their versions of the error. + // See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4 + // and https://github.com/letsencrypt/boulder/blob/0e07eacb/docs/acme-divergences.md#section-66. + ae, ok := err.(*Error) + return ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce") +} + +// isRetriable reports whether a request can be retried +// based on the response status code. +// +// Note that a "bad nonce" error is returned with a non-retriable 400 Bad Request code. +// Callers should parse the response and check with isBadNonce. +func isRetriable(code int) bool { + return code <= 399 || code >= 500 || code == http.StatusTooManyRequests +} + +// responseError creates an error of Error type from resp. +func responseError(resp *http.Response) error { + // don't care if ReadAll returns an error: + // json.Unmarshal will fail in that case anyway + b, _ := ioutil.ReadAll(resp.Body) + e := &wireError{Status: resp.StatusCode} + if err := json.Unmarshal(b, e); err != nil { + // this is not a regular error response: + // populate detail with anything we received, + // e.Status will already contain HTTP response code value + e.Detail = string(b) + if e.Detail == "" { + e.Detail = resp.Status + } + } + return e.error(resp.Header) +} diff --git a/vendor/golang.org/x/crypto/acme/jws.go b/vendor/golang.org/x/crypto/acme/jws.go new file mode 100644 index 00000000..6cbca25d --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/jws.go @@ -0,0 +1,153 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + _ "crypto/sha512" // need for EC keys + "encoding/base64" + "encoding/json" + "fmt" + "math/big" +) + +// jwsEncodeJSON signs claimset using provided key and a nonce. +// The result is serialized in JSON format. +// See https://tools.ietf.org/html/rfc7515#section-7. +func jwsEncodeJSON(claimset interface{}, key crypto.Signer, nonce string) ([]byte, error) { + jwk, err := jwkEncode(key.Public()) + if err != nil { + return nil, err + } + alg, sha := jwsHasher(key) + if alg == "" || !sha.Available() { + return nil, ErrUnsupportedKey + } + phead := fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q}`, alg, jwk, nonce) + phead = base64.RawURLEncoding.EncodeToString([]byte(phead)) + cs, err := json.Marshal(claimset) + if err != nil { + return nil, err + } + payload := base64.RawURLEncoding.EncodeToString(cs) + hash := sha.New() + hash.Write([]byte(phead + "." + payload)) + sig, err := jwsSign(key, sha, hash.Sum(nil)) + if err != nil { + return nil, err + } + + enc := struct { + Protected string `json:"protected"` + Payload string `json:"payload"` + Sig string `json:"signature"` + }{ + Protected: phead, + Payload: payload, + Sig: base64.RawURLEncoding.EncodeToString(sig), + } + return json.Marshal(&enc) +} + +// jwkEncode encodes public part of an RSA or ECDSA key into a JWK. +// The result is also suitable for creating a JWK thumbprint. +// https://tools.ietf.org/html/rfc7517 +func jwkEncode(pub crypto.PublicKey) (string, error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.3.1 + n := pub.N + e := big.NewInt(int64(pub.E)) + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`, + base64.RawURLEncoding.EncodeToString(e.Bytes()), + base64.RawURLEncoding.EncodeToString(n.Bytes()), + ), nil + case *ecdsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.2.1 + p := pub.Curve.Params() + n := p.BitSize / 8 + if p.BitSize%8 != 0 { + n++ + } + x := pub.X.Bytes() + if n > len(x) { + x = append(make([]byte, n-len(x)), x...) + } + y := pub.Y.Bytes() + if n > len(y) { + y = append(make([]byte, n-len(y)), y...) + } + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`, + p.Name, + base64.RawURLEncoding.EncodeToString(x), + base64.RawURLEncoding.EncodeToString(y), + ), nil + } + return "", ErrUnsupportedKey +} + +// jwsSign signs the digest using the given key. +// It returns ErrUnsupportedKey if the key type is unknown. +// The hash is used only for RSA keys. +func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { + switch key := key.(type) { + case *rsa.PrivateKey: + return key.Sign(rand.Reader, digest, hash) + case *ecdsa.PrivateKey: + r, s, err := ecdsa.Sign(rand.Reader, key, digest) + if err != nil { + return nil, err + } + rb, sb := r.Bytes(), s.Bytes() + size := key.Params().BitSize / 8 + if size%8 > 0 { + size++ + } + sig := make([]byte, size*2) + copy(sig[size-len(rb):], rb) + copy(sig[size*2-len(sb):], sb) + return sig, nil + } + return nil, ErrUnsupportedKey +} + +// jwsHasher indicates suitable JWS algorithm name and a hash function +// to use for signing a digest with the provided key. +// It returns ("", 0) if the key is not supported. +func jwsHasher(key crypto.Signer) (string, crypto.Hash) { + switch key := key.(type) { + case *rsa.PrivateKey: + return "RS256", crypto.SHA256 + case *ecdsa.PrivateKey: + switch key.Params().Name { + case "P-256": + return "ES256", crypto.SHA256 + case "P-384": + return "ES384", crypto.SHA384 + case "P-521": + return "ES512", crypto.SHA512 + } + } + return "", 0 +} + +// JWKThumbprint creates a JWK thumbprint out of pub +// as specified in https://tools.ietf.org/html/rfc7638. +func JWKThumbprint(pub crypto.PublicKey) (string, error) { + jwk, err := jwkEncode(pub) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(jwk)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} diff --git a/vendor/golang.org/x/crypto/acme/types.go b/vendor/golang.org/x/crypto/acme/types.go new file mode 100644 index 00000000..54792c06 --- /dev/null +++ b/vendor/golang.org/x/crypto/acme/types.go @@ -0,0 +1,329 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/x509" + "errors" + "fmt" + "net/http" + "strings" + "time" +) + +// ACME server response statuses used to describe Authorization and Challenge states. +const ( + StatusUnknown = "unknown" + StatusPending = "pending" + StatusProcessing = "processing" + StatusValid = "valid" + StatusInvalid = "invalid" + StatusRevoked = "revoked" +) + +// CRLReasonCode identifies the reason for a certificate revocation. +type CRLReasonCode int + +// CRL reason codes as defined in RFC 5280. +const ( + CRLReasonUnspecified CRLReasonCode = 0 + CRLReasonKeyCompromise CRLReasonCode = 1 + CRLReasonCACompromise CRLReasonCode = 2 + CRLReasonAffiliationChanged CRLReasonCode = 3 + CRLReasonSuperseded CRLReasonCode = 4 + CRLReasonCessationOfOperation CRLReasonCode = 5 + CRLReasonCertificateHold CRLReasonCode = 6 + CRLReasonRemoveFromCRL CRLReasonCode = 8 + CRLReasonPrivilegeWithdrawn CRLReasonCode = 9 + CRLReasonAACompromise CRLReasonCode = 10 +) + +// ErrUnsupportedKey is returned when an unsupported key type is encountered. +var ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported") + +// Error is an ACME error, defined in Problem Details for HTTP APIs doc +// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem. +type Error struct { + // StatusCode is The HTTP status code generated by the origin server. + StatusCode int + // ProblemType is a URI reference that identifies the problem type, + // typically in a "urn:acme:error:xxx" form. + ProblemType string + // Detail is a human-readable explanation specific to this occurrence of the problem. + Detail string + // Header is the original server error response headers. + // It may be nil. + Header http.Header +} + +func (e *Error) Error() string { + return fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail) +} + +// AuthorizationError indicates that an authorization for an identifier +// did not succeed. +// It contains all errors from Challenge items of the failed Authorization. +type AuthorizationError struct { + // URI uniquely identifies the failed Authorization. + URI string + + // Identifier is an AuthzID.Value of the failed Authorization. + Identifier string + + // Errors is a collection of non-nil error values of Challenge items + // of the failed Authorization. + Errors []error +} + +func (a *AuthorizationError) Error() string { + e := make([]string, len(a.Errors)) + for i, err := range a.Errors { + e[i] = err.Error() + } + return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; ")) +} + +// RateLimit reports whether err represents a rate limit error and +// any Retry-After duration returned by the server. +// +// See the following for more details on rate limiting: +// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6 +func RateLimit(err error) (time.Duration, bool) { + e, ok := err.(*Error) + if !ok { + return 0, false + } + // Some CA implementations may return incorrect values. + // Use case-insensitive comparison. + if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") { + return 0, false + } + if e.Header == nil { + return 0, true + } + return retryAfter(e.Header.Get("Retry-After")), true +} + +// Account is a user account. It is associated with a private key. +type Account struct { + // URI is the account unique ID, which is also a URL used to retrieve + // account data from the CA. + URI string + + // Contact is a slice of contact info used during registration. + Contact []string + + // The terms user has agreed to. + // A value not matching CurrentTerms indicates that the user hasn't agreed + // to the actual Terms of Service of the CA. + AgreedTerms string + + // Actual terms of a CA. + CurrentTerms string + + // Authz is the authorization URL used to initiate a new authz flow. + Authz string + + // Authorizations is a URI from which a list of authorizations + // granted to this account can be fetched via a GET request. + Authorizations string + + // Certificates is a URI from which a list of certificates + // issued for this account can be fetched via a GET request. + Certificates string +} + +// Directory is ACME server discovery data. +type Directory struct { + // RegURL is an account endpoint URL, allowing for creating new + // and modifying existing accounts. + RegURL string + + // AuthzURL is used to initiate Identifier Authorization flow. + AuthzURL string + + // CertURL is a new certificate issuance endpoint URL. + CertURL string + + // RevokeURL is used to initiate a certificate revocation flow. + RevokeURL string + + // Term is a URI identifying the current terms of service. + Terms string + + // Website is an HTTP or HTTPS URL locating a website + // providing more information about the ACME server. + Website string + + // CAA consists of lowercase hostname elements, which the ACME server + // recognises as referring to itself for the purposes of CAA record validation + // as defined in RFC6844. + CAA []string +} + +// Challenge encodes a returned CA challenge. +// Its Error field may be non-nil if the challenge is part of an Authorization +// with StatusInvalid. +type Challenge struct { + // Type is the challenge type, e.g. "http-01", "tls-sni-02", "dns-01". + Type string + + // URI is where a challenge response can be posted to. + URI string + + // Token is a random value that uniquely identifies the challenge. + Token string + + // Status identifies the status of this challenge. + Status string + + // Error indicates the reason for an authorization failure + // when this challenge was used. + // The type of a non-nil value is *Error. + Error error +} + +// Authorization encodes an authorization response. +type Authorization struct { + // URI uniquely identifies a authorization. + URI string + + // Status identifies the status of an authorization. + Status string + + // Identifier is what the account is authorized to represent. + Identifier AuthzID + + // Challenges that the client needs to fulfill in order to prove possession + // of the identifier (for pending authorizations). + // For final authorizations, the challenges that were used. + Challenges []*Challenge + + // A collection of sets of challenges, each of which would be sufficient + // to prove possession of the identifier. + // Clients must complete a set of challenges that covers at least one set. + // Challenges are identified by their indices in the challenges array. + // If this field is empty, the client needs to complete all challenges. + Combinations [][]int +} + +// AuthzID is an identifier that an account is authorized to represent. +type AuthzID struct { + Type string // The type of identifier, e.g. "dns". + Value string // The identifier itself, e.g. "example.org". +} + +// wireAuthz is ACME JSON representation of Authorization objects. +type wireAuthz struct { + Status string + Challenges []wireChallenge + Combinations [][]int + Identifier struct { + Type string + Value string + } +} + +func (z *wireAuthz) authorization(uri string) *Authorization { + a := &Authorization{ + URI: uri, + Status: z.Status, + Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value}, + Combinations: z.Combinations, // shallow copy + Challenges: make([]*Challenge, len(z.Challenges)), + } + for i, v := range z.Challenges { + a.Challenges[i] = v.challenge() + } + return a +} + +func (z *wireAuthz) error(uri string) *AuthorizationError { + err := &AuthorizationError{ + URI: uri, + Identifier: z.Identifier.Value, + } + for _, raw := range z.Challenges { + if raw.Error != nil { + err.Errors = append(err.Errors, raw.Error.error(nil)) + } + } + return err +} + +// wireChallenge is ACME JSON challenge representation. +type wireChallenge struct { + URI string `json:"uri"` + Type string + Token string + Status string + Error *wireError +} + +func (c *wireChallenge) challenge() *Challenge { + v := &Challenge{ + URI: c.URI, + Type: c.Type, + Token: c.Token, + Status: c.Status, + } + if v.Status == "" { + v.Status = StatusPending + } + if c.Error != nil { + v.Error = c.Error.error(nil) + } + return v +} + +// wireError is a subset of fields of the Problem Details object +// as described in https://tools.ietf.org/html/rfc7807#section-3.1. +type wireError struct { + Status int + Type string + Detail string +} + +func (e *wireError) error(h http.Header) *Error { + return &Error{ + StatusCode: e.Status, + ProblemType: e.Type, + Detail: e.Detail, + Header: h, + } +} + +// CertOption is an optional argument type for the TLS ChallengeCert methods for +// customizing a temporary certificate for TLS-based challenges. +type CertOption interface { + privateCertOpt() +} + +// WithKey creates an option holding a private/public key pair. +// The private part signs a certificate, and the public part represents the signee. +func WithKey(key crypto.Signer) CertOption { + return &certOptKey{key} +} + +type certOptKey struct { + key crypto.Signer +} + +func (*certOptKey) privateCertOpt() {} + +// WithTemplate creates an option for specifying a certificate template. +// See x509.CreateCertificate for template usage details. +// +// In TLS ChallengeCert methods, the template is also used as parent, +// resulting in a self-signed certificate. +// The DNSNames field of t is always overwritten for tls-sni challenge certs. +func WithTemplate(t *x509.Certificate) CertOption { + return (*certOptTemplate)(t) +} + +type certOptTemplate x509.Certificate + +func (*certOptTemplate) privateCertOpt() {} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go index f8b807f9..aeb73f81 100644 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -12,9 +12,10 @@ import ( "crypto/subtle" "errors" "fmt" - "golang.org/x/crypto/blowfish" "io" "strconv" + + "golang.org/x/crypto/blowfish" ) const ( @@ -205,7 +206,6 @@ func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { } func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { - csalt, err := base64Decode(salt) if err != nil { return nil, err @@ -213,7 +213,8 @@ func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cip // Bug compatibility with C bcrypt implementations. They use the trailing // NULL in the key string during expansion. - ckey := append(key, 0) + // We copy the key to prevent changing the underlying array. + ckey := append(key[:len(key):len(key)], 0) c, err := blowfish.NewSaltedCipher(ckey, csalt) if err != nil { @@ -240,11 +241,11 @@ func (p *hashed) Hash() []byte { n = 3 } arr[n] = '$' - n += 1 + n++ copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) n += 2 arr[n] = '$' - n += 1 + n++ copy(arr[n:], p.salt) n += encodedSaltSize copy(arr[n:], p.hash) diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go index 542984aa..2641dadd 100644 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -6,7 +6,7 @@ package blowfish // import "golang.org/x/crypto/blowfish" // The code is a port of Bruce Schneier's C implementation. -// See http://www.schneier.com/blowfish.html. +// See https://www.schneier.com/blowfish.html. import "strconv" @@ -39,7 +39,7 @@ func NewCipher(key []byte) (*Cipher, error) { // NewSaltedCipher creates a returns a Cipher that folds a salt into its key // schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is -// sufficient and desirable. For bcrypt compatiblity, the key can be over 56 +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 // bytes. func NewSaltedCipher(key, salt []byte) (*Cipher, error) { if len(salt) == 0 { diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go index 8c5ee4cb..d0407759 100644 --- a/vendor/golang.org/x/crypto/blowfish/const.go +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -4,7 +4,7 @@ // The startup permutation array and substitution boxes. // They are the hexadecimal digits of PI; see: -// http://www.schneier.com/code/constants.txt. +// https://www.schneier.com/code/constants.txt. package blowfish diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h new file mode 100644 index 00000000..b3f74162 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.h @@ -0,0 +1,8 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s new file mode 100644 index 00000000..ee7b4bd5 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// These constants cannot be encoded in non-MOVQ immediates. +// We access them directly from memory instead. + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s new file mode 100644 index 00000000..cd793a5b --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s @@ -0,0 +1,65 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +// func cswap(inout *[4][5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + SUBQ $1, SI + NOTQ SI + MOVQ SI, X15 + PSHUFD $0x44, X15, X15 + + MOVOU 0(DI), X0 + MOVOU 16(DI), X2 + MOVOU 32(DI), X4 + MOVOU 48(DI), X6 + MOVOU 64(DI), X8 + MOVOU 80(DI), X1 + MOVOU 96(DI), X3 + MOVOU 112(DI), X5 + MOVOU 128(DI), X7 + MOVOU 144(DI), X9 + + MOVO X1, X10 + MOVO X3, X11 + MOVO X5, X12 + MOVO X7, X13 + MOVO X9, X14 + + PXOR X0, X10 + PXOR X2, X11 + PXOR X4, X12 + PXOR X6, X13 + PXOR X8, X14 + PAND X15, X10 + PAND X15, X11 + PAND X15, X12 + PAND X15, X13 + PAND X15, X14 + PXOR X10, X0 + PXOR X10, X1 + PXOR X11, X2 + PXOR X11, X3 + PXOR X12, X4 + PXOR X12, X5 + PXOR X13, X6 + PXOR X13, X7 + PXOR X14, X8 + PXOR X14, X9 + + MOVOU X0, 0(DI) + MOVOU X2, 16(DI) + MOVOU X4, 32(DI) + MOVOU X6, 48(DI) + MOVOU X8, 64(DI) + MOVOU X1, 80(DI) + MOVOU X3, 96(DI) + MOVOU X5, 112(DI) + MOVOU X7, 128(DI) + MOVOU X9, 144(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 00000000..cb8fbc57 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,834 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// We have an implementation in amd64 assembly so this code is only run on +// non-amd64 platforms. The amd64 assembly does not support gccgo. +// +build !amd64 gccgo appengine + +package curve25519 + +import ( + "encoding/binary" +) + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + b = -b + for i := range f { + t := b & (f[i] ^ g[i]) + f[i] ^= t + g[i] ^= t + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + return int64(binary.LittleEndian.Uint32(in)) +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := load3(src[29:]) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go new file mode 100644 index 00000000..da9b10d9 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/doc.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of scalar multiplication on +// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html +package curve25519 // import "golang.org/x/crypto/curve25519" + +// basePoint is the x coordinate of the generator of the curve. +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +// ScalarMult sets dst to the product in*base where dst and base are the x +// coordinates of group points and all values are in little-endian form. +func ScalarMult(dst, in, base *[32]byte) { + scalarMult(dst, in, base) +} + +// ScalarBaseMult sets dst to the product in*base where dst and base are the x +// coordinates of group points, base is the standard generator and all values +// are in little-endian form. +func ScalarBaseMult(dst, in *[32]byte) { + ScalarMult(dst, in, &basePoint) +} diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s new file mode 100644 index 00000000..39081610 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$0-8 + MOVQ inout+0(FP), DI + + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ $REDMASK51,AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s new file mode 100644 index 00000000..9e9040b2 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s @@ -0,0 +1,1377 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func ladderstep(inout *[5][5]uint64) +TEXT ·ladderstep(SB),0,$296-8 + MOVQ inout+0(FP),DI + + MOVQ 40(DI),SI + MOVQ 48(DI),DX + MOVQ 56(DI),CX + MOVQ 64(DI),R8 + MOVQ 72(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 80(DI),SI + ADDQ 88(DI),DX + ADDQ 96(DI),CX + ADDQ 104(DI),R8 + ADDQ 112(DI),R9 + SUBQ 80(DI),AX + SUBQ 88(DI),R10 + SUBQ 96(DI),R11 + SUBQ 104(DI),R12 + SUBQ 112(DI),R13 + MOVQ SI,0(SP) + MOVQ DX,8(SP) + MOVQ CX,16(SP) + MOVQ R8,24(SP) + MOVQ R9,32(SP) + MOVQ AX,40(SP) + MOVQ R10,48(SP) + MOVQ R11,56(SP) + MOVQ R12,64(SP) + MOVQ R13,72(SP) + MOVQ 40(SP),AX + MULQ 40(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 48(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 48(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 72(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(SP) + MOVQ R8,88(SP) + MOVQ R9,96(SP) + MOVQ AX,104(SP) + MOVQ R10,112(SP) + MOVQ 0(SP),AX + MULQ 0(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 8(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 32(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(SP) + MOVQ R8,128(SP) + MOVQ R9,136(SP) + MOVQ AX,144(SP) + MOVQ R10,152(SP) + MOVQ SI,SI + MOVQ R8,DX + MOVQ R9,CX + MOVQ AX,R8 + MOVQ R10,R9 + ADDQ ·_2P0(SB),SI + ADDQ ·_2P1234(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R8 + ADDQ ·_2P1234(SB),R9 + SUBQ 80(SP),SI + SUBQ 88(SP),DX + SUBQ 96(SP),CX + SUBQ 104(SP),R8 + SUBQ 112(SP),R9 + MOVQ SI,160(SP) + MOVQ DX,168(SP) + MOVQ CX,176(SP) + MOVQ R8,184(SP) + MOVQ R9,192(SP) + MOVQ 120(DI),SI + MOVQ 128(DI),DX + MOVQ 136(DI),CX + MOVQ 144(DI),R8 + MOVQ 152(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 160(DI),SI + ADDQ 168(DI),DX + ADDQ 176(DI),CX + ADDQ 184(DI),R8 + ADDQ 192(DI),R9 + SUBQ 160(DI),AX + SUBQ 168(DI),R10 + SUBQ 176(DI),R11 + SUBQ 184(DI),R12 + SUBQ 192(DI),R13 + MOVQ SI,200(SP) + MOVQ DX,208(SP) + MOVQ CX,216(SP) + MOVQ R8,224(SP) + MOVQ R9,232(SP) + MOVQ AX,240(SP) + MOVQ R10,248(SP) + MOVQ R11,256(SP) + MOVQ R12,264(SP) + MOVQ R13,272(SP) + MOVQ 224(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,280(SP) + MULQ 56(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 232(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,288(SP) + MULQ 48(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 40(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 200(SP),AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 200(SP),AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 200(SP),AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 208(SP),AX + MULQ 40(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 208(SP),AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),AX + MULQ 40(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 216(SP),AX + MULQ 48(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 216(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 224(SP),AX + MULQ 40(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 224(SP),AX + MULQ 48(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 280(SP),AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 280(SP),AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 232(SP),AX + MULQ 40(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 288(SP),AX + MULQ 56(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 288(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 288(SP),AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(SP) + MOVQ R8,48(SP) + MOVQ R9,56(SP) + MOVQ AX,64(SP) + MOVQ R10,72(SP) + MOVQ 264(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,200(SP) + MULQ 16(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,208(SP) + MULQ 8(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 0(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 240(SP),AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 240(SP),AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 240(SP),AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 248(SP),AX + MULQ 0(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 248(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 248(SP),AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 248(SP),AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 248(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 0(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 256(SP),AX + MULQ 8(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 256(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 0(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 264(SP),AX + MULQ 8(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 200(SP),AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 200(SP),AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 272(SP),AX + MULQ 0(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),AX + MULQ 16(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 24(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,DX + MOVQ R8,CX + MOVQ R9,R11 + MOVQ AX,R12 + MOVQ R10,R13 + ADDQ ·_2P0(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 40(SP),SI + ADDQ 48(SP),R8 + ADDQ 56(SP),R9 + ADDQ 64(SP),AX + ADDQ 72(SP),R10 + SUBQ 40(SP),DX + SUBQ 48(SP),CX + SUBQ 56(SP),R11 + SUBQ 64(SP),R12 + SUBQ 72(SP),R13 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ DX,160(DI) + MOVQ CX,168(DI) + MOVQ R11,176(DI) + MOVQ R12,184(DI) + MOVQ R13,192(DI) + MOVQ 120(DI),AX + MULQ 120(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 128(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 136(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 144(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 152(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(DI),AX + MULQ 128(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 136(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 144(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),AX + MULQ 136(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 144(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $19,DX,AX + MULQ 144(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(DI),DX + IMUL3Q $19,DX,AX + MULQ 152(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ 160(DI),AX + MULQ 160(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 168(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 176(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 184(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 192(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 168(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 176(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 184(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 176(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 184(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 184(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 16(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 0(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 8(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + MULQ 16(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + MULQ 24(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + MULQ 32(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 0(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 168(DI),AX + MULQ 8(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + MULQ 16(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + MULQ 24(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 0(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 176(DI),AX + MULQ 8(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 176(DI),AX + MULQ 16(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 24(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),AX + MULQ 0(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(DI),AX + MULQ 8(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 24(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 32(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),AX + MULQ 0(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 16(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 24(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 32(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 144(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 96(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 152(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 88(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(SP),AX + MULQ 96(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(SP),AX + MULQ 104(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(SP),AX + MULQ 112(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(SP),AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 128(SP),AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(SP),AX + MULQ 96(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(SP),AX + MULQ 104(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),AX + MULQ 80(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 136(SP),AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 136(SP),AX + MULQ 96(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 104(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(SP),AX + MULQ 80(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 144(SP),AX + MULQ 88(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 104(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 112(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(SP),AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 96(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(DI) + MOVQ R8,48(DI) + MOVQ R9,56(DI) + MOVQ AX,64(DI) + MOVQ R10,72(DI) + MOVQ 160(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + MOVQ AX,SI + MOVQ DX,CX + MOVQ 168(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,CX + MOVQ DX,R8 + MOVQ 176(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R8 + MOVQ DX,R9 + MOVQ 184(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R9 + MOVQ DX,R10 + MOVQ 192(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R10 + IMUL3Q $19,DX,DX + ADDQ DX,SI + ADDQ 80(SP),SI + ADDQ 88(SP),CX + ADDQ 96(SP),R8 + ADDQ 104(SP),R9 + ADDQ 112(SP),R10 + MOVQ SI,80(DI) + MOVQ CX,88(DI) + MOVQ R8,96(DI) + MOVQ R9,104(DI) + MOVQ R10,112(DI) + MOVQ 104(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 176(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 112(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 168(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 160(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 168(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 80(DI),AX + MULQ 176(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 80(DI),AX + MULQ 184(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 80(DI),AX + MULQ 192(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 88(DI),AX + MULQ 160(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 88(DI),AX + MULQ 168(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(DI),AX + MULQ 176(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 88(DI),AX + MULQ 184(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 88(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),AX + MULQ 160(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 96(DI),AX + MULQ 168(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 96(DI),AX + MULQ 176(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 104(DI),AX + MULQ 160(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(DI),AX + MULQ 168(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 184(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 192(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 112(DI),AX + MULQ 160(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 176(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 184(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 192(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(DI) + MOVQ R8,88(DI) + MOVQ R9,96(DI) + MOVQ AX,104(DI) + MOVQ R10,112(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go new file mode 100644 index 00000000..5822bd53 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package curve25519 + +// These functions are implemented in the .s files. The names of the functions +// in the rest of the file are also taken from the SUPERCOP sources to help +// people following along. + +//go:noescape + +func cswap(inout *[5]uint64, v uint64) + +//go:noescape + +func ladderstep(inout *[5][5]uint64) + +//go:noescape + +func freeze(inout *[5]uint64) + +//go:noescape + +func mul(dest, a, b *[5]uint64) + +//go:noescape + +func square(out, in *[5]uint64) + +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. +func mladder(xr, zr *[5]uint64, s *[32]byte) { + var work [5][5]uint64 + + work[0] = *xr + setint(&work[1], 1) + setint(&work[2], 0) + work[3] = *xr + setint(&work[4], 1) + + j := uint(6) + var prevbit byte + + for i := 31; i >= 0; i-- { + for j < 8 { + bit := ((*s)[i] >> j) & 1 + swap := bit ^ prevbit + prevbit = bit + cswap(&work[1], uint64(swap)) + ladderstep(&work) + j-- + } + j = 7 + } + + *xr = work[1] + *zr = work[2] +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + copy(e[:], (*in)[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var t, z [5]uint64 + unpack(&t, base) + mladder(&t, &z, &e) + invert(&z, &z) + mul(&t, &t, &z) + pack(out, &t) +} + +func setint(r *[5]uint64, v uint64) { + r[0] = v + r[1] = 0 + r[2] = 0 + r[3] = 0 + r[4] = 0 +} + +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian +// order. +func unpack(r *[5]uint64, x *[32]byte) { + r[0] = uint64(x[0]) | + uint64(x[1])<<8 | + uint64(x[2])<<16 | + uint64(x[3])<<24 | + uint64(x[4])<<32 | + uint64(x[5])<<40 | + uint64(x[6]&7)<<48 + + r[1] = uint64(x[6])>>3 | + uint64(x[7])<<5 | + uint64(x[8])<<13 | + uint64(x[9])<<21 | + uint64(x[10])<<29 | + uint64(x[11])<<37 | + uint64(x[12]&63)<<45 + + r[2] = uint64(x[12])>>6 | + uint64(x[13])<<2 | + uint64(x[14])<<10 | + uint64(x[15])<<18 | + uint64(x[16])<<26 | + uint64(x[17])<<34 | + uint64(x[18])<<42 | + uint64(x[19]&1)<<50 + + r[3] = uint64(x[19])>>1 | + uint64(x[20])<<7 | + uint64(x[21])<<15 | + uint64(x[22])<<23 | + uint64(x[23])<<31 | + uint64(x[24])<<39 | + uint64(x[25]&15)<<47 + + r[4] = uint64(x[25])>>4 | + uint64(x[26])<<4 | + uint64(x[27])<<12 | + uint64(x[28])<<20 | + uint64(x[29])<<28 | + uint64(x[30])<<36 | + uint64(x[31]&127)<<44 +} + +// pack sets out = x where out is the usual, little-endian form of the 5, +// 51-bit limbs in x. +func pack(out *[32]byte, x *[5]uint64) { + t := *x + freeze(&t) + + out[0] = byte(t[0]) + out[1] = byte(t[0] >> 8) + out[2] = byte(t[0] >> 16) + out[3] = byte(t[0] >> 24) + out[4] = byte(t[0] >> 32) + out[5] = byte(t[0] >> 40) + out[6] = byte(t[0] >> 48) + + out[6] ^= byte(t[1]<<3) & 0xf8 + out[7] = byte(t[1] >> 5) + out[8] = byte(t[1] >> 13) + out[9] = byte(t[1] >> 21) + out[10] = byte(t[1] >> 29) + out[11] = byte(t[1] >> 37) + out[12] = byte(t[1] >> 45) + + out[12] ^= byte(t[2]<<6) & 0xc0 + out[13] = byte(t[2] >> 2) + out[14] = byte(t[2] >> 10) + out[15] = byte(t[2] >> 18) + out[16] = byte(t[2] >> 26) + out[17] = byte(t[2] >> 34) + out[18] = byte(t[2] >> 42) + out[19] = byte(t[2] >> 50) + + out[19] ^= byte(t[3]<<1) & 0xfe + out[20] = byte(t[3] >> 7) + out[21] = byte(t[3] >> 15) + out[22] = byte(t[3] >> 23) + out[23] = byte(t[3] >> 31) + out[24] = byte(t[3] >> 39) + out[25] = byte(t[3] >> 47) + + out[25] ^= byte(t[4]<<4) & 0xf0 + out[26] = byte(t[4] >> 4) + out[27] = byte(t[4] >> 12) + out[28] = byte(t[4] >> 20) + out[29] = byte(t[4] >> 28) + out[30] = byte(t[4] >> 36) + out[31] = byte(t[4] >> 44) +} + +// invert calculates r = x^-1 mod p using Fermat's little theorem. +func invert(r *[5]uint64, x *[5]uint64) { + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 + + square(&z2, x) /* 2 */ + square(&t, &z2) /* 4 */ + square(&t, &t) /* 8 */ + mul(&z9, &t, x) /* 9 */ + mul(&z11, &z9, &z2) /* 11 */ + square(&t, &z11) /* 22 */ + mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ + + square(&t, &z2_5_0) /* 2^6 - 2^1 */ + for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ + + square(&t, &z2_10_0) /* 2^11 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ + + square(&t, &z2_20_0) /* 2^21 - 2^1 */ + for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ + square(&t, &t) + } + mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ + + square(&t, &t) /* 2^41 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ + square(&t, &t) + } + mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ + + square(&t, &z2_50_0) /* 2^51 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ + square(&t, &t) + } + mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ + + square(&t, &z2_100_0) /* 2^101 - 2^1 */ + for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ + square(&t, &t) + } + mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ + + square(&t, &t) /* 2^201 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ + square(&t, &t) + } + mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ + + square(&t, &t) /* 2^251 - 2^1 */ + square(&t, &t) /* 2^252 - 2^2 */ + square(&t, &t) /* 2^253 - 2^3 */ + + square(&t, &t) /* 2^254 - 2^4 */ + + square(&t, &t) /* 2^255 - 2^5 */ + mul(r, &t, &z11) /* 2^255 - 21 */ +} diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s new file mode 100644 index 00000000..5ce80a2e --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s @@ -0,0 +1,169 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$16-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,0(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 0(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 0(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ $REDMASK51,SI + SHLQ $13,R9:R8 + ANDQ SI,R8 + SHLQ $13,R11:R10 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BP:BX + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s new file mode 100644 index 00000000..12f73734 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/square_amd64.s @@ -0,0 +1,132 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$0-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ $REDMASK51,SI + SHLQ $13,R8:CX + ANDQ SI,CX + SHLQ $13,R10:R9 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R12:R11 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R14:R13 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,BX:R15 + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go deleted file mode 100644 index 602fefa6..00000000 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ /dev/null @@ -1,592 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses -// are signed messages attesting to the validity of a certificate for a small -// period of time. This is used to manage revocation for X.509 certificates. -package ocsp // import "golang.org/x/crypto/ocsp" - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "math/big" - "time" -) - -var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1}) - -// These are internal structures that reflect the ASN.1 structure of an OCSP -// response. See RFC 2560, section 4.2. - -const ( - ocspSuccess = 0 - ocspMalformed = 1 - ocspInternalError = 2 - ocspTryLater = 3 - ocspSigRequired = 4 - ocspUnauthorized = 5 -) - -type certID struct { - HashAlgorithm pkix.AlgorithmIdentifier - NameHash []byte - IssuerKeyHash []byte - SerialNumber *big.Int -} - -// https://tools.ietf.org/html/rfc2560#section-4.1.1 -type ocspRequest struct { - TBSRequest tbsRequest -} - -type tbsRequest struct { - Version int `asn1:"explicit,tag:0,default:0,optional"` - RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"` - RequestList []request -} - -type request struct { - Cert certID -} - -type responseASN1 struct { - Status asn1.Enumerated - Response responseBytes `asn1:"explicit,tag:0"` -} - -type responseBytes struct { - ResponseType asn1.ObjectIdentifier - Response []byte -} - -type basicResponse struct { - TBSResponseData responseData - SignatureAlgorithm pkix.AlgorithmIdentifier - Signature asn1.BitString - Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"` -} - -type responseData struct { - Raw asn1.RawContent - Version int `asn1:"optional,default:1,explicit,tag:0"` - RawResponderName asn1.RawValue `asn1:"optional,explicit,tag:1"` - KeyHash []byte `asn1:"optional,explicit,tag:2"` - ProducedAt time.Time `asn1:"generalized"` - Responses []singleResponse -} - -type singleResponse struct { - CertID certID - Good asn1.Flag `asn1:"tag:0,optional"` - Revoked revokedInfo `asn1:"explicit,tag:1,optional"` - Unknown asn1.Flag `asn1:"tag:2,optional"` - ThisUpdate time.Time `asn1:"generalized"` - NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` -} - -type revokedInfo struct { - RevocationTime time.Time `asn1:"generalized"` - Reason int `asn1:"explicit,tag:0,optional"` -} - -var ( - oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} - oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} - oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} - oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} - oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} - oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} - oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} - oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2} - oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} - oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} - oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} - oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} -) - -var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ - crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), - crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), - crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), - crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), -} - -// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below -var signatureAlgorithmDetails = []struct { - algo x509.SignatureAlgorithm - oid asn1.ObjectIdentifier - pubKeyAlgo x509.PublicKeyAlgorithm - hash crypto.Hash -}{ - {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, - {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, - {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, - {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, - {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, - {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, - {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, - {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, - {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, - {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, - {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, - {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, -} - -// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below -func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { - var pubType x509.PublicKeyAlgorithm - - switch pub := pub.(type) { - case *rsa.PublicKey: - pubType = x509.RSA - hashFunc = crypto.SHA256 - sigAlgo.Algorithm = oidSignatureSHA256WithRSA - sigAlgo.Parameters = asn1.RawValue{ - Tag: 5, - } - - case *ecdsa.PublicKey: - pubType = x509.ECDSA - - switch pub.Curve { - case elliptic.P224(), elliptic.P256(): - hashFunc = crypto.SHA256 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 - case elliptic.P384(): - hashFunc = crypto.SHA384 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 - case elliptic.P521(): - hashFunc = crypto.SHA512 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 - default: - err = errors.New("x509: unknown elliptic curve") - } - - default: - err = errors.New("x509: only RSA and ECDSA keys supported") - } - - if err != nil { - return - } - - if requestedSigAlgo == 0 { - return - } - - found := false - for _, details := range signatureAlgorithmDetails { - if details.algo == requestedSigAlgo { - if details.pubKeyAlgo != pubType { - err = errors.New("x509: requested SignatureAlgorithm does not match private key type") - return - } - sigAlgo.Algorithm, hashFunc = details.oid, details.hash - if hashFunc == 0 { - err = errors.New("x509: cannot sign with hash function requested") - return - } - found = true - break - } - } - - if !found { - err = errors.New("x509: unknown SignatureAlgorithm") - } - - return -} - -// TODO(agl): this is taken from crypto/x509 and so should probably be exported -// from crypto/x509 or crypto/x509/pkix. -func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm { - for _, details := range signatureAlgorithmDetails { - if oid.Equal(details.oid) { - return details.algo - } - } - return x509.UnknownSignatureAlgorithm -} - -// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. -func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { - for hash, oid := range hashOIDs { - if oid.Equal(target) { - return hash - } - } - return crypto.Hash(0) -} - -// This is the exposed reflection of the internal OCSP structures. - -const ( - // Good means that the certificate is valid. - Good = iota - // Revoked means that the certificate has been deliberately revoked. - Revoked = iota - // Unknown means that the OCSP responder doesn't know about the certificate. - Unknown = iota - // ServerFailed means that the OCSP responder failed to process the request. - ServerFailed = iota -) - -// Request represents an OCSP request. See RFC 2560. -type Request struct { - HashAlgorithm crypto.Hash - IssuerNameHash []byte - IssuerKeyHash []byte - SerialNumber *big.Int -} - -// Response represents an OCSP response. See RFC 2560. -type Response struct { - // Status is one of {Good, Revoked, Unknown, ServerFailed} - Status int - SerialNumber *big.Int - ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time - RevocationReason int - Certificate *x509.Certificate - // TBSResponseData contains the raw bytes of the signed response. If - // Certificate is nil then this can be used to verify Signature. - TBSResponseData []byte - Signature []byte - SignatureAlgorithm x509.SignatureAlgorithm -} - -// These are pre-serialized error responses for the various non-success codes -// defined by OCSP. The Unauthorized code in particular can be used by an OCSP -// responder that supports only pre-signed responses as a response to requests -// for certificates with unknown status. See RFC 5019. -var ( - MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} - InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} - TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} - SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} - UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} -) - -// CheckSignatureFrom checks that the signature in resp is a valid signature -// from issuer. This should only be used if resp.Certificate is nil. Otherwise, -// the OCSP response contained an intermediate certificate that created the -// signature. That signature is checked by ParseResponse and only -// resp.Certificate remains to be validated. -func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error { - return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature) -} - -// ParseError results from an invalid OCSP response. -type ParseError string - -func (p ParseError) Error() string { - return string(p) -} - -// ParseRequest parses an OCSP request in DER form. It only supports -// requests for a single certificate. Signed requests are not supported. -// If a request includes a signature, it will result in a ParseError. -func ParseRequest(bytes []byte) (*Request, error) { - var req ocspRequest - rest, err := asn1.Unmarshal(bytes, &req) - if err != nil { - return nil, err - } - if len(rest) > 0 { - return nil, ParseError("trailing data in OCSP request") - } - - if len(req.TBSRequest.RequestList) == 0 { - return nil, ParseError("OCSP request contains no request body") - } - innerRequest := req.TBSRequest.RequestList[0] - - hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm) - if hashFunc == crypto.Hash(0) { - return nil, ParseError("OCSP request uses unknown hash function") - } - - return &Request{ - HashAlgorithm: hashFunc, - IssuerNameHash: innerRequest.Cert.NameHash, - IssuerKeyHash: innerRequest.Cert.IssuerKeyHash, - SerialNumber: innerRequest.Cert.SerialNumber, - }, nil -} - -// ParseResponse parses an OCSP response in DER form. It only supports -// responses for a single certificate. If the response contains a certificate -// then the signature over the response is checked. If issuer is not nil then -// it will be used to validate the signature or embedded certificate. Invalid -// signatures or parse failures will result in a ParseError. -func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) { - var resp responseASN1 - rest, err := asn1.Unmarshal(bytes, &resp) - if err != nil { - return nil, err - } - if len(rest) > 0 { - return nil, ParseError("trailing data in OCSP response") - } - - ret := new(Response) - if resp.Status != ocspSuccess { - ret.Status = ServerFailed - return ret, nil - } - - if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) { - return nil, ParseError("bad OCSP response type") - } - - var basicResp basicResponse - rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp) - if err != nil { - return nil, err - } - - if len(basicResp.Certificates) > 1 { - return nil, ParseError("OCSP response contains bad number of certificates") - } - - if len(basicResp.TBSResponseData.Responses) != 1 { - return nil, ParseError("OCSP response contains bad number of responses") - } - - ret.TBSResponseData = basicResp.TBSResponseData.Raw - ret.Signature = basicResp.Signature.RightAlign() - ret.SignatureAlgorithm = getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm) - - if len(basicResp.Certificates) > 0 { - ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes) - if err != nil { - return nil, err - } - - if err := ret.CheckSignatureFrom(ret.Certificate); err != nil { - return nil, ParseError("bad OCSP signature") - } - - if issuer != nil { - if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil { - return nil, ParseError("bad signature on embedded certificate") - } - } - } else if issuer != nil { - if err := ret.CheckSignatureFrom(issuer); err != nil { - return nil, ParseError("bad OCSP signature") - } - } - - r := basicResp.TBSResponseData.Responses[0] - - ret.SerialNumber = r.CertID.SerialNumber - - switch { - case bool(r.Good): - ret.Status = Good - case bool(r.Unknown): - ret.Status = Unknown - default: - ret.Status = Revoked - ret.RevokedAt = r.Revoked.RevocationTime - ret.RevocationReason = r.Revoked.Reason - } - - ret.ProducedAt = basicResp.TBSResponseData.ProducedAt - ret.ThisUpdate = r.ThisUpdate - ret.NextUpdate = r.NextUpdate - - return ret, nil -} - -// RequestOptions contains options for constructing OCSP requests. -type RequestOptions struct { - // Hash contains the hash function that should be used when - // constructing the OCSP request. If zero, SHA-1 will be used. - Hash crypto.Hash -} - -func (opts *RequestOptions) hash() crypto.Hash { - if opts == nil || opts.Hash == 0 { - // SHA-1 is nearly universally used in OCSP. - return crypto.SHA1 - } - return opts.Hash -} - -// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If -// opts is nil then sensible defaults are used. -func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) { - hashFunc := opts.hash() - - // OCSP seems to be the only place where these raw hash identifiers are - // used. I took the following from - // http://msdn.microsoft.com/en-us/library/ff635603.aspx - var hashOID asn1.ObjectIdentifier - hashOID, ok := hashOIDs[hashFunc] - if !ok { - return nil, x509.ErrUnsupportedAlgorithm - } - - if !hashFunc.Available() { - return nil, x509.ErrUnsupportedAlgorithm - } - h := opts.hash().New() - - var publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { - return nil, err - } - - h.Write(publicKeyInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(issuer.RawSubject) - issuerNameHash := h.Sum(nil) - - return asn1.Marshal(ocspRequest{ - tbsRequest{ - Version: 0, - RequestList: []request{ - { - Cert: certID{ - pkix.AlgorithmIdentifier{ - Algorithm: hashOID, - Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, - }, - issuerNameHash, - issuerKeyHash, - cert.SerialNumber, - }, - }, - }, - }, - }) -} - -// CreateResponse returns a DER-encoded OCSP response with the specified contents. -// The fields in the response are populated as follows: -// -// The responder cert is used to populate the ResponderName field, and the certificate -// itself is provided alongside the OCSP response signature. -// -// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields. -// (SHA-1 is used for the hash function; this is not configurable.) -// -// The template is used to populate the SerialNumber, RevocationStatus, RevokedAt, -// RevocationReason, ThisUpdate, and NextUpdate fields. -// -// The ProducedAt date is automatically set to the current date, to the nearest minute. -func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) { - var publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { - return nil, err - } - - h := sha1.New() - h.Write(publicKeyInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(issuer.RawSubject) - issuerNameHash := h.Sum(nil) - - innerResponse := singleResponse{ - CertID: certID{ - HashAlgorithm: pkix.AlgorithmIdentifier{ - Algorithm: hashOIDs[crypto.SHA1], - Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, - }, - NameHash: issuerNameHash, - IssuerKeyHash: issuerKeyHash, - SerialNumber: template.SerialNumber, - }, - ThisUpdate: template.ThisUpdate.UTC(), - NextUpdate: template.NextUpdate.UTC(), - } - - switch template.Status { - case Good: - innerResponse.Good = true - case Unknown: - innerResponse.Unknown = true - case Revoked: - innerResponse.Revoked = revokedInfo{ - RevocationTime: template.RevokedAt.UTC(), - Reason: template.RevocationReason, - } - } - - responderName := asn1.RawValue{ - Class: 2, // context-specific - Tag: 1, // explicit tag - IsCompound: true, - Bytes: responderCert.RawSubject, - } - tbsResponseData := responseData{ - Version: 0, - RawResponderName: responderName, - ProducedAt: time.Now().Truncate(time.Minute).UTC(), - Responses: []singleResponse{innerResponse}, - } - - tbsResponseDataDER, err := asn1.Marshal(tbsResponseData) - if err != nil { - return nil, err - } - - hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm) - if err != nil { - return nil, err - } - - responseHash := hashFunc.New() - responseHash.Write(tbsResponseDataDER) - signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc) - if err != nil { - return nil, err - } - - response := basicResponse{ - TBSResponseData: tbsResponseData, - SignatureAlgorithm: signatureAlgorithm, - Signature: asn1.BitString{ - Bytes: signature, - BitLength: 8 * len(signature), - }, - } - if template.Certificate != nil { - response.Certificates = []asn1.RawValue{ - asn1.RawValue{FullBytes: template.Certificate.Raw}, - } - } - responseDER, err := asn1.Marshal(response) - if err != nil { - return nil, err - } - - return asn1.Marshal(responseASN1{ - Status: ocspSuccess, - Response: responseBytes{ - ResponseType: idPKIXOCSPBasic, - Response: responseDER, - }, - }) -} diff --git a/vendor/golang.org/x/crypto/otr/libotr_test_helper.c b/vendor/golang.org/x/crypto/otr/libotr_test_helper.c index 6423eeda..b3ca072d 100644 --- a/vendor/golang.org/x/crypto/otr/libotr_test_helper.c +++ b/vendor/golang.org/x/crypto/otr/libotr_test_helper.c @@ -13,6 +13,7 @@ #include #include +#include static int g_session_established = 0; @@ -20,92 +21,109 @@ OtrlPolicy policy(void *opdata, ConnContext *context) { return OTRL_POLICY_ALWAYS; } -int is_logged_in(void *opdata, const char *accountname, const char *protocol, const char *recipient) { +int is_logged_in(void *opdata, const char *accountname, const char *protocol, + const char *recipient) { return 1; } -void inject_message(void *opdata, const char *accountname, const char *protocol, const char *recipient, const char *message) { +void inject_message(void *opdata, const char *accountname, const char *protocol, + const char *recipient, const char *message) { printf("%s\n", message); fflush(stdout); fprintf(stderr, "libotr helper sent: %s\n", message); } -void notify(void *opdata, OtrlNotifyLevel level, const char *accountname, const char *protocol, const char *username, const char *title, const char *primary, const char *secondary) { - fprintf(stderr, "NOTIFY: %s %s %s %s\n", username, title, primary, secondary); +void update_context_list(void *opdata) {} + +void new_fingerprint(void *opdata, OtrlUserState us, const char *accountname, + const char *protocol, const char *username, + unsigned char fingerprint[20]) { + fprintf(stderr, "NEW FINGERPRINT\n"); + g_session_established = 1; } -int display_otr_message(void *opdata, const char *accountname, const char *protocol, const char *username, const char *msg) { - fprintf(stderr, "MESSAGE: %s %s\n", username, msg); - return 1; +void write_fingerprints(void *opdata) {} + +void gone_secure(void *opdata, ConnContext *context) {} + +void gone_insecure(void *opdata, ConnContext *context) {} + +void still_secure(void *opdata, ConnContext *context, int is_reply) {} + +int max_message_size(void *opdata, ConnContext *context) { return 99999; } + +const char *account_name(void *opdata, const char *account, + const char *protocol) { + return "ACCOUNT"; } -void update_context_list(void *opdata) { +void account_name_free(void *opdata, const char *account_name) {} + +const char *error_message(void *opdata, ConnContext *context, + OtrlErrorCode err_code) { + return "ERR"; } -const char *protocol_name(void *opdata, const char *protocol) { - return "PROTOCOL"; -} +void error_message_free(void *opdata, const char *msg) {} -void protocol_name_free(void *opdata, const char *protocol_name) { -} +void resent_msg_prefix_free(void *opdata, const char *prefix) {} -void new_fingerprint(void *opdata, OtrlUserState us, const char *accountname, const char *protocol, const char *username, unsigned char fingerprint[20]) { - fprintf(stderr, "NEW FINGERPRINT\n"); - g_session_established = 1; -} +void handle_smp_event(void *opdata, OtrlSMPEvent smp_event, + ConnContext *context, unsigned short progress_event, + char *question) {} -void write_fingerprints(void *opdata) { -} - -void gone_secure(void *opdata, ConnContext *context) { -} - -void gone_insecure(void *opdata, ConnContext *context) { -} - -void still_secure(void *opdata, ConnContext *context, int is_reply) { -} - -void log_message(void *opdata, const char *message) { - fprintf(stderr, "MESSAGE: %s\n", message); -} - -int max_message_size(void *opdata, ConnContext *context) { - return 99999; -} - -const char *account_name(void *opdata, const char *account, const char *protocol) { - return "ACCOUNT"; -} - -void account_name_free(void *opdata, const char *account_name) { +void handle_msg_event(void *opdata, OtrlMessageEvent msg_event, + ConnContext *context, const char *message, + gcry_error_t err) { + fprintf(stderr, "msg event: %d %s\n", msg_event, message); } OtrlMessageAppOps uiops = { - policy, - NULL, - is_logged_in, - inject_message, - notify, - display_otr_message, - update_context_list, - protocol_name, - protocol_name_free, - new_fingerprint, - write_fingerprints, - gone_secure, - gone_insecure, - still_secure, - log_message, - max_message_size, - account_name, - account_name_free, + policy, + NULL, + is_logged_in, + inject_message, + update_context_list, + new_fingerprint, + write_fingerprints, + gone_secure, + gone_insecure, + still_secure, + max_message_size, + account_name, + account_name_free, + NULL, /* received_symkey */ + error_message, + error_message_free, + NULL, /* resent_msg_prefix */ + resent_msg_prefix_free, + handle_smp_event, + handle_msg_event, + NULL /* create_instag */, + NULL /* convert_msg */, + NULL /* convert_free */, + NULL /* timer_control */, }; -static const char kPrivateKeyData[] = "(privkeys (account (name \"account\") (protocol proto) (private-key (dsa (p #00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB8C031D3561FECEE72EBB4A090D450A9B7A857#) (q #00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#) (g #535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57597766A2F9CE3857D7ACE3E1E3BC1FC6F26#) (y #0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A3C0FF501E3DC673B76D7BABF349009B6ECF#) (x #14D0345A3562C480A039E3C72764F72D79043216#)))))\n"; +static const char kPrivateKeyData[] = + "(privkeys (account (name \"account\") (protocol proto) (private-key (dsa " + "(p " + "#00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F" + "30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E" + "5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB" + "8C031D3561FECEE72EBB4A090D450A9B7A857#) (q " + "#00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#) (g " + "#535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F" + "1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F" + "6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57" + "597766A2F9CE3857D7ACE3E1E3BC1FC6F26#) (y " + "#0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF" + "2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93" + "454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A" + "3C0FF501E3DC673B76D7BABF349009B6ECF#) (x " + "#14D0345A3562C480A039E3C72764F72D79043216#)))))\n"; -int -main() { +int main() { OTRL_INIT; // We have to write the private key information to a file because the libotr @@ -116,12 +134,13 @@ main() { } char private_key_file[256]; - snprintf(private_key_file, sizeof(private_key_file), "%s/libotr_test_helper_privatekeys-XXXXXX", tmpdir); + snprintf(private_key_file, sizeof(private_key_file), + "%s/libotr_test_helper_privatekeys-XXXXXX", tmpdir); int fd = mkstemp(private_key_file); if (fd == -1) { perror("creating temp file"); } - write(fd, kPrivateKeyData, sizeof(kPrivateKeyData)-1); + write(fd, kPrivateKeyData, sizeof(kPrivateKeyData) - 1); close(fd); OtrlUserState userstate = otrl_userstate_create(); @@ -133,7 +152,7 @@ main() { char buf[4096]; for (;;) { - char* message = fgets(buf, sizeof(buf), stdin); + char *message = fgets(buf, sizeof(buf), stdin); if (strlen(message) == 0) { break; } @@ -142,9 +161,11 @@ main() { char *newmessage = NULL; OtrlTLV *tlvs; - int ignore_message = otrl_message_receiving(userstate, &uiops, NULL, "account", "proto", "peer", message, &newmessage, &tlvs, NULL, NULL); + int ignore_message = otrl_message_receiving( + userstate, &uiops, NULL, "account", "proto", "peer", message, + &newmessage, &tlvs, NULL, NULL, NULL); if (tlvs) { - otrl_tlv_free(tlvs); + otrl_tlv_free(tlvs); } if (newmessage != NULL) { @@ -154,16 +175,21 @@ main() { gcry_error_t err; char *newmessage = NULL; - err = otrl_message_sending(userstate, &uiops, NULL, "account", "proto", "peer", "test message", NULL, &newmessage, NULL, NULL); + err = otrl_message_sending(userstate, &uiops, NULL, "account", "proto", + "peer", 0, "test message", NULL, &newmessage, + OTRL_FRAGMENT_SEND_SKIP, NULL, NULL, NULL); if (newmessage == NULL) { fprintf(stderr, "libotr didn't encrypt message\n"); return 1; } write(1, newmessage, strlen(newmessage)); write(1, "\n", 1); - g_session_established = 0; + fprintf(stderr, "libotr sent: %s\n", newmessage); otrl_message_free(newmessage); + + g_session_established = 0; write(1, "?OTRv2?\n", 8); + fprintf(stderr, "libotr sent: ?OTRv2\n"); } } diff --git a/vendor/golang.org/x/crypto/otr/otr.go b/vendor/golang.org/x/crypto/otr/otr.go index 07ac080e..173b753d 100644 --- a/vendor/golang.org/x/crypto/otr/otr.go +++ b/vendor/golang.org/x/crypto/otr/otr.go @@ -277,7 +277,7 @@ func (c *Conversation) Receive(in []byte) (out []byte, encrypted bool, change Se in = in[len(msgPrefix) : len(in)-1] } else if version := isQuery(in); version > 0 { c.authState = authStateAwaitingDHKey - c.myKeyId = 0 + c.reset() toSend = c.encode(c.generateDHCommit()) return } else { @@ -311,7 +311,7 @@ func (c *Conversation) Receive(in []byte) (out []byte, encrypted bool, change Se if err = c.processDHCommit(msg); err != nil { return } - c.myKeyId = 0 + c.reset() toSend = c.encode(c.generateDHKey()) return case authStateAwaitingDHKey: @@ -330,7 +330,7 @@ func (c *Conversation) Receive(in []byte) (out []byte, encrypted bool, change Se if err = c.processDHCommit(msg); err != nil { return } - c.myKeyId = 0 + c.reset() toSend = c.encode(c.generateDHKey()) return } @@ -343,7 +343,7 @@ func (c *Conversation) Receive(in []byte) (out []byte, encrypted bool, change Se if err = c.processDHCommit(msg); err != nil { return } - c.myKeyId = 0 + c.reset() toSend = c.encode(c.generateDHKey()) c.authState = authStateAwaitingRevealSig default: @@ -417,12 +417,11 @@ func (c *Conversation) Receive(in []byte) (out []byte, encrypted bool, change Se change = SMPSecretNeeded c.smp.saved = &inTLV return - } else if err == smpFailureError { + } + if err == smpFailureError { err = nil change = SMPFailed - return - } - if complete { + } else if complete { change = SMPComplete } if reply.typ != 0 { @@ -848,7 +847,6 @@ func (c *Conversation) rotateDHKeys() { slot := &c.keySlots[i] if slot.used && slot.myKeyId == c.myKeyId-1 { slot.used = false - c.oldMACs = append(c.oldMACs, slot.sendMACKey...) c.oldMACs = append(c.oldMACs, slot.recvMACKey...) } } @@ -924,7 +922,6 @@ func (c *Conversation) processData(in []byte) (out []byte, tlvs []tlv, err error slot := &c.keySlots[i] if slot.used && slot.theirKeyId == theirKeyId-1 { slot.used = false - c.oldMACs = append(c.oldMACs, slot.sendMACKey...) c.oldMACs = append(c.oldMACs, slot.recvMACKey...) } } @@ -946,6 +943,7 @@ func (c *Conversation) processData(in []byte) (out []byte, tlvs []tlv, err error t.data, tlvData, ok3 = getNBytes(tlvData, int(t.length)) if !ok1 || !ok2 || !ok3 { err = errors.New("otr: corrupt tlv data") + return } tlvs = append(tlvs, t) } @@ -1039,8 +1037,7 @@ func (c *Conversation) calcDataKeys(myKeyId, theirKeyId uint32) (slot *keySlot, } } if slot == nil { - err = errors.New("otr: internal error: no key slots") - return + return nil, errors.New("otr: internal error: no more key slots") } var myPriv, myPub, theirPub *big.Int @@ -1096,6 +1093,10 @@ func (c *Conversation) calcDataKeys(myKeyId, theirKeyId uint32) (slot *keySlot, h.Write(slot.recvAESKey) slot.recvMACKey = h.Sum(slot.recvMACKey[:0]) + slot.theirKeyId = theirKeyId + slot.myKeyId = myKeyId + slot.used = true + zero(slot.theirLastCtr[:]) return } @@ -1162,6 +1163,14 @@ func (c *Conversation) encode(msg []byte) [][]byte { return ret } +func (c *Conversation) reset() { + c.myKeyId = 0 + + for i := range c.keySlots { + c.keySlots[i].used = false + } +} + type PublicKey struct { dsa.PublicKey } @@ -1305,6 +1314,12 @@ func (priv *PrivateKey) Import(in []byte) bool { mpis[i] = new(big.Int).SetBytes(mpiBytes) } + for _, mpi := range mpis { + if mpi.Sign() <= 0 { + return false + } + } + priv.PrivateKey.P = mpis[0] priv.PrivateKey.Q = mpis[1] priv.PrivateKey.G = mpis[2] diff --git a/vendor/golang.org/x/crypto/ssh/test/doc.go b/vendor/golang.org/x/crypto/ssh/test/doc.go new file mode 100644 index 00000000..198f0ca1 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/doc.go @@ -0,0 +1,7 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package test contains integration tests for the +// golang.org/x/crypto/ssh package. +package test // import "golang.org/x/crypto/ssh/test" diff --git a/vendor/golang.org/x/crypto/ssh/test/sshd_test_pw.c b/vendor/golang.org/x/crypto/ssh/test/sshd_test_pw.c new file mode 100644 index 00000000..2794a563 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/test/sshd_test_pw.c @@ -0,0 +1,173 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// sshd_test_pw.c +// Wrapper to inject test password data for sshd PAM authentication +// +// This wrapper implements custom versions of getpwnam, getpwnam_r, +// getspnam and getspnam_r. These functions first call their real +// libc versions, then check if the requested user matches test user +// specified in env variable TEST_USER and if so replace the password +// with crypted() value of TEST_PASSWD env variable. +// +// Compile: +// gcc -Wall -shared -o sshd_test_pw.so -fPIC sshd_test_pw.c +// +// Compile with debug: +// gcc -DVERBOSE -Wall -shared -o sshd_test_pw.so -fPIC sshd_test_pw.c +// +// Run sshd: +// LD_PRELOAD="sshd_test_pw.so" TEST_USER="..." TEST_PASSWD="..." sshd ... + +// +build ignore + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#ifdef VERBOSE +#define DEBUG(X...) fprintf(stderr, X) +#else +#define DEBUG(X...) while (0) { } +#endif + +/* crypt() password */ +static char * +pwhash(char *passwd) { + return strdup(crypt(passwd, "$6$")); +} + +/* Pointers to real functions in libc */ +static struct passwd * (*real_getpwnam)(const char *) = NULL; +static int (*real_getpwnam_r)(const char *, struct passwd *, char *, size_t, struct passwd **) = NULL; +static struct spwd * (*real_getspnam)(const char *) = NULL; +static int (*real_getspnam_r)(const char *, struct spwd *, char *, size_t, struct spwd **) = NULL; + +/* Cached test user and test password */ +static char *test_user = NULL; +static char *test_passwd_hash = NULL; + +static void +init(void) { + /* Fetch real libc function pointers */ + real_getpwnam = dlsym(RTLD_NEXT, "getpwnam"); + real_getpwnam_r = dlsym(RTLD_NEXT, "getpwnam_r"); + real_getspnam = dlsym(RTLD_NEXT, "getspnam"); + real_getspnam_r = dlsym(RTLD_NEXT, "getspnam_r"); + + /* abort if env variables are not defined */ + if (getenv("TEST_USER") == NULL || getenv("TEST_PASSWD") == NULL) { + fprintf(stderr, "env variables TEST_USER and TEST_PASSWD are missing\n"); + abort(); + } + + /* Fetch test user and test password from env */ + test_user = strdup(getenv("TEST_USER")); + test_passwd_hash = pwhash(getenv("TEST_PASSWD")); + + DEBUG("sshd_test_pw init():\n"); + DEBUG("\treal_getpwnam: %p\n", real_getpwnam); + DEBUG("\treal_getpwnam_r: %p\n", real_getpwnam_r); + DEBUG("\treal_getspnam: %p\n", real_getspnam); + DEBUG("\treal_getspnam_r: %p\n", real_getspnam_r); + DEBUG("\tTEST_USER: '%s'\n", test_user); + DEBUG("\tTEST_PASSWD: '%s'\n", getenv("TEST_PASSWD")); + DEBUG("\tTEST_PASSWD_HASH: '%s'\n", test_passwd_hash); +} + +static int +is_test_user(const char *name) { + if (test_user != NULL && strcmp(test_user, name) == 0) + return 1; + return 0; +} + +/* getpwnam */ + +struct passwd * +getpwnam(const char *name) { + struct passwd *pw; + + DEBUG("sshd_test_pw getpwnam(%s)\n", name); + + if (real_getpwnam == NULL) + init(); + if ((pw = real_getpwnam(name)) == NULL) + return NULL; + + if (is_test_user(name)) + pw->pw_passwd = strdup(test_passwd_hash); + + return pw; +} + +/* getpwnam_r */ + +int +getpwnam_r(const char *name, + struct passwd *pwd, + char *buf, + size_t buflen, + struct passwd **result) { + int r; + + DEBUG("sshd_test_pw getpwnam_r(%s)\n", name); + + if (real_getpwnam_r == NULL) + init(); + if ((r = real_getpwnam_r(name, pwd, buf, buflen, result)) != 0 || *result == NULL) + return r; + + if (is_test_user(name)) + pwd->pw_passwd = strdup(test_passwd_hash); + + return 0; +} + +/* getspnam */ + +struct spwd * +getspnam(const char *name) { + struct spwd *sp; + + DEBUG("sshd_test_pw getspnam(%s)\n", name); + + if (real_getspnam == NULL) + init(); + if ((sp = real_getspnam(name)) == NULL) + return NULL; + + if (is_test_user(name)) + sp->sp_pwdp = strdup(test_passwd_hash); + + return sp; +} + +/* getspnam_r */ + +int +getspnam_r(const char *name, + struct spwd *spbuf, + char *buf, + size_t buflen, + struct spwd **spbufp) { + int r; + + DEBUG("sshd_test_pw getspnam_r(%s)\n", name); + + if (real_getspnam_r == NULL) + init(); + if ((r = real_getspnam_r(name, spbuf, buf, buflen, spbufp)) != 0) + return r; + + if (is_test_user(name)) + spbuf->sp_pwdp = strdup(test_passwd_hash); + + return r; +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go deleted file mode 100644 index 9419ca99..00000000 --- a/vendor/golang.org/x/net/publicsuffix/list.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package publicsuffix provides a public suffix list based on data from -// http://publicsuffix.org/. A public suffix is one under which Internet users -// can directly register names. -package publicsuffix // import "golang.org/x/net/publicsuffix" - -// TODO: specify case sensitivity and leading/trailing dot behavior for -// func PublicSuffix and func EffectiveTLDPlusOne. - -import ( - "fmt" - "net/http/cookiejar" - "strings" -) - -// List implements the cookiejar.PublicSuffixList interface by calling the -// PublicSuffix function. -var List cookiejar.PublicSuffixList = list{} - -type list struct{} - -func (list) PublicSuffix(domain string) string { - ps, _ := PublicSuffix(domain) - return ps -} - -func (list) String() string { - return version -} - -// PublicSuffix returns the public suffix of the domain using a copy of the -// publicsuffix.org database compiled into the library. -// -// icann is whether the public suffix is managed by the Internet Corporation -// for Assigned Names and Numbers. If not, the public suffix is privately -// managed. For example, foo.org and foo.co.uk are ICANN domains, -// foo.dyndns.org and foo.blogspot.co.uk are private domains. -// -// Use cases for distinguishing ICANN domains like foo.com from private -// domains like foo.appspot.com can be found at -// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases -func PublicSuffix(domain string) (publicSuffix string, icann bool) { - lo, hi := uint32(0), uint32(numTLD) - s, suffix, wildcard := domain, len(domain), false -loop: - for { - dot := strings.LastIndex(s, ".") - if wildcard { - suffix = 1 + dot - } - if lo == hi { - break - } - f := find(s[1+dot:], lo, hi) - if f == notFound { - break - } - - u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) - icann = u&(1<>= nodesBitsICANN - u = children[u&(1<>= childrenBitsLo - hi = u & (1<>= childrenBitsHi - switch u & (1<>= childrenBitsNodeType - wildcard = u&(1<>= nodesBitsTextLength - offset := x & (1< burst { - tokens = burst - } - // update state - r.lim.last = now - r.lim.tokens = tokens - if r.timeToAct == r.lim.lastEvent { - prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { - r.lim.lastEvent = prevEvent - } - } - - return -} - -// Reserve is shorthand for ReserveN(time.Now(), 1). -func (lim *Limiter) Reserve() *Reservation { - return lim.ReserveN(time.Now(), 1) -} - -// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. -// The Limiter takes this Reservation into account when allowing future events. -// ReserveN returns false if n exceeds the Limiter's burst size. -// Usage example: -// r, ok := lim.ReserveN(time.Now(), 1) -// if !ok { -// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? -// } -// time.Sleep(r.Delay()) -// Act() -// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. -// If you need to respect a deadline or cancel the delay, use Wait instead. -// To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) - return &r -} - -// Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) Wait(ctx context.Context) (err error) { - return lim.WaitN(ctx, 1) -} - -// WaitN blocks until lim permits n events to happen. -// It returns an error if n exceeds the Limiter's burst size, the Context is -// canceled, or the expected wait time exceeds the Context's Deadline. -func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { - if n > lim.burst { - return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) - } - // Check if ctx is already cancelled - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - // Determine wait limit - now := time.Now() - waitLimit := InfDuration - if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) - } - // Reserve - r := lim.reserveN(now, n, waitLimit) - if !r.ok { - return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) - } - // Wait - t := time.NewTimer(r.DelayFrom(now)) - defer t.Stop() - select { - case <-t.C: - // We can proceed. - return nil - case <-ctx.Done(): - // Context was canceled before we could proceed. Cancel the - // reservation, which may permit other events to proceed sooner. - r.Cancel() - return ctx.Err() - } -} - -// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). -func (lim *Limiter) SetLimit(newLimit Limit) { - lim.SetLimitAt(time.Now(), newLimit) -} - -// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated -// or underutilized by those which reserved (using Reserve or Wait) but did not yet act -// before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { - lim.mu.Lock() - defer lim.mu.Unlock() - - now, _, tokens := lim.advance(now) - - lim.last = now - lim.tokens = tokens - lim.limit = newLimit -} - -// reserveN is a helper method for AllowN, ReserveN, and WaitN. -// maxFutureReserve specifies the maximum reservation wait duration allowed. -// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { - lim.mu.Lock() - defer lim.mu.Unlock() - - if lim.limit == Inf { - return Reservation{ - ok: true, - lim: lim, - tokens: n, - timeToAct: now, - } - } - - now, last, tokens := lim.advance(now) - - // Calculate the remaining number of tokens resulting from the request. - tokens -= float64(n) - - // Calculate the wait duration - var waitDuration time.Duration - if tokens < 0 { - waitDuration = lim.limit.durationFromTokens(-tokens) - } - - // Decide result - ok := n <= lim.burst && waitDuration <= maxFutureReserve - - // Prepare reservation - r := Reservation{ - ok: ok, - lim: lim, - limit: lim.limit, - } - if ok { - r.tokens = n - r.timeToAct = now.Add(waitDuration) - } - - // Update state - if ok { - lim.last = now - lim.tokens = tokens - lim.lastEvent = r.timeToAct - } else { - lim.last = last - } - - return r -} - -// advance calculates and returns an updated state for lim resulting from the passage of time. -// lim is not changed. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { - last := lim.last - if now.Before(last) { - last = now - } - - // Avoid making delta overflow below when last is very old. - maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) - elapsed := now.Sub(last) - if elapsed > maxElapsed { - elapsed = maxElapsed - } - - // Calculate the new number of tokens, due to time that passed. - delta := lim.limit.tokensFromDuration(elapsed) - tokens := lim.tokens + delta - if burst := float64(lim.burst); tokens > burst { - tokens = burst - } - - return now, last, tokens -} - -// durationFromTokens is a unit conversion function from the number of tokens to the duration -// of time it takes to accumulate them at a rate of limit tokens per second. -func (limit Limit) durationFromTokens(tokens float64) time.Duration { - seconds := tokens / float64(limit) - return time.Nanosecond * time.Duration(1e9*seconds) -} - -// tokensFromDuration is a unit conversion function from a time duration to the number of tokens -// which could be accumulated during that duration at a rate of limit tokens per second. -func (limit Limit) tokensFromDuration(d time.Duration) float64 { - return d.Seconds() * float64(limit) -} diff --git a/vendor/gopkg.in/square/go-jose.v1/LICENSE b/vendor/gopkg.in/square/go-jose.v1/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/gopkg.in/square/go-jose.v1/README.md b/vendor/gopkg.in/square/go-jose.v1/README.md deleted file mode 100644 index fd859da7..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/README.md +++ /dev/null @@ -1,209 +0,0 @@ -# Go JOSE - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) [![license](http://img.shields.io/badge/license-apache_2.0-red.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) [![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose) [![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose) - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. For the moment, it mainly focuses on encryption -and signing based on the JSON Web Encryption and JSON Web Signature standards. - -**Disclaimer**: This library contains encryption software that is subject to -the U.S. Export Administration Regulations. You may not export, re-export, -transfer or download this code or any part of it in violation of any United -States law, directive or regulation. In particular this software may not be -exported or re-exported in any form or on any media to Iran, North Sudan, -Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any -US maintained blocked list. - -## Overview - -The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) -standard (RFC 7516) and -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) -standard (RFC 7515). Tables of supported algorithms are shown below. -The library supports both the compact and full serialization formats, and has -optional support for multiple recipients. It also comes with a small -command-line utility -([`jose-util`](https://github.com/square/go-jose/tree/master/jose-util)) -for dealing with JOSE messages in a shell. - -**Note**: We use a forked version of the `encoding/json` package from the Go -standard library which uses case-sensitive matching for member names (instead -of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). -This is to avoid differences in interpretation of messages between go-jose and -libraries in other languages. If you do not like this behavior, you can use the -`std_json` build tag to disable it (though we do not recommend doing so). - -### Versions - -We use [gopkg.in](https://gopkg.in) for versioning. - -[Version 1](https://gopkg.in/square/go-jose.v1) is the current stable version: - - import "gopkg.in/square/go-jose.v1" - -The interface for [go-jose.v1](https://gopkg.in/square/go-jose.v1) will remain -backwards compatible. We're currently sketching out ideas for a new version, to -clean up the interface a bit. If you have ideas or feature requests [please let -us know](https://github.com/square/go-jose/issues/64)! - -### Supported algorithms - -See below for a table of supported algorithms. Algorithm identifiers match -the names in the -[JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) -standard where possible. The -[Godoc reference](https://godoc.org/github.com/square/go-jose#pkg-constants) -has a list of constants. - - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 - -1. Not supported in multi-recipient mode - - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM - - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF - -### Supported key types - -See below for a table of supported key types. These are understood by the -library, and can be passed to corresponding functions such as `NewEncrypter` or -`NewSigner`. Note that if you are creating a new encrypter or signer with a -JsonWebKey, the key id of the JsonWebKey (if present) will be added to any -resulting messages. - - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - AES, HMAC | []byte, *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - -## Examples - -Encryption/decryption example using RSA: - -```Go -// Generate a public/private key pair to use for this example. The library -// also provides two utility functions (LoadPublicKey and LoadPrivateKey) -// that can be used to load keys from PEM/DER-encoded data. -privateKey, err := rsa.GenerateKey(rand.Reader, 2048) -if err != nil { - panic(err) -} - -// Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would -// indicate that the selected algorithm(s) are not currently supported. -publicKey := &privateKey.PublicKey -encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) -if err != nil { - panic(err) -} - -// Encrypt a sample plaintext. Calling the encrypter returns an encrypted -// JWE object, which can then be serialized for output afterwards. An error -// would indicate a problem in an underlying cryptographic primitive. -var plaintext = []byte("Lorem ipsum dolor sit amet") -object, err := encrypter.Encrypt(plaintext) -if err != nil { - panic(err) -} - -// Serialize the encrypted object using the full serialization format. -// Alternatively you can also use the compact format here by calling -// object.CompactSerialize() instead. -serialized := object.FullSerialize() - -// Parse the serialized, encrypted JWE object. An error would indicate that -// the given input did not represent a valid message. -object, err = ParseEncrypted(serialized) -if err != nil { - panic(err) -} - -// Now we can decrypt and get back our original plaintext. An error here -// would indicate the the message failed to decrypt, e.g. because the auth -// tag was broken or the message was tampered with. -decrypted, err := object.Decrypt(privateKey) -if err != nil { - panic(err) -} - -fmt.Printf(string(decrypted)) -// output: Lorem ipsum dolor sit amet -``` - -Signing/verification example using RSA: - -```Go -// Generate a public/private key pair to use for this example. The library -// also provides two utility functions (LoadPublicKey and LoadPrivateKey) -// that can be used to load keys from PEM/DER-encoded data. -privateKey, err := rsa.GenerateKey(rand.Reader, 2048) -if err != nil { - panic(err) -} - -// Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. -signer, err := NewSigner(PS512, privateKey) -if err != nil { - panic(err) -} - -// Sign a sample payload. Calling the signer returns a protected JWS object, -// which can then be serialized for output afterwards. An error would -// indicate a problem in an underlying cryptographic primitive. -var payload = []byte("Lorem ipsum dolor sit amet") -object, err := signer.Sign(payload) -if err != nil { - panic(err) -} - -// Serialize the encrypted object using the full serialization format. -// Alternatively you can also use the compact format here by calling -// object.CompactSerialize() instead. -serialized := object.FullSerialize() - -// Parse the serialized, protected JWS object. An error would indicate that -// the given input did not represent a valid message. -object, err = ParseSigned(serialized) -if err != nil { - panic(err) -} - -// Now we can verify the signature on the payload. An error here would -// indicate the the message failed to verify, e.g. because the signature was -// broken or the message was tampered with. -output, err := object.Verify(&privateKey.PublicKey) -if err != nil { - panic(err) -} - -fmt.Printf(string(output)) -// output: Lorem ipsum dolor sit amet -``` - -More examples can be found in the [Godoc -reference](https://godoc.org/github.com/square/go-jose) for this package. The -[`jose-util`](https://github.com/square/go-jose/tree/master/jose-util) -subdirectory also contains a small command-line utility which might -be useful as an example. diff --git a/vendor/gopkg.in/square/go-jose.v1/asymmetric.go b/vendor/gopkg.in/square/go-jose.v1/asymmetric.go deleted file mode 100644 index 381156ca..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/asymmetric.go +++ /dev/null @@ -1,498 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto" - "crypto/aes" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "errors" - "fmt" - "math/big" - - "gopkg.in/square/go-jose.v1/cipher" -) - -// A generic RSA-based encrypter/verifier -type rsaEncrypterVerifier struct { - publicKey *rsa.PublicKey -} - -// A generic RSA-based decrypter/signer -type rsaDecrypterSigner struct { - privateKey *rsa.PrivateKey -} - -// A generic EC-based encrypter/verifier -type ecEncrypterVerifier struct { - publicKey *ecdsa.PublicKey -} - -// A key generator for ECDH-ES -type ecKeyGenerator struct { - size int - algID string - publicKey *ecdsa.PublicKey -} - -// A generic EC-based decrypter/signer -type ecDecrypterSigner struct { - privateKey *ecdsa.PrivateKey -} - -// newRSARecipient creates recipientKeyInfo based on the given key. -func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &rsaEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newRSASigner creates a recipientSigInfo based on the given key. -func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case RS256, RS384, RS512, PS256, PS384, PS512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: &JsonWebKey{ - Key: &privateKey.PublicKey, - }, - signer: &rsaDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// newECDHRecipient creates recipientKeyInfo based on the given key. -func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &ecEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newECDSASigner creates a recipientSigInfo based on the given key. -func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case ES256, ES384, ES512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: &JsonWebKey{ - Key: &privateKey.PublicKey, - }, - signer: &ecDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// Encrypt the given payload and update the object. -func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - encryptedKey, err := ctx.encrypt(cek, alg) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: encryptedKey, - header: &rawHeader{}, - }, nil -} - -// Encrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { - switch alg { - case RSA1_5: - return rsa.EncryptPKCS1v15(randReader, ctx.publicKey, cek) - case RSA_OAEP: - return rsa.EncryptOAEP(sha1.New(), randReader, ctx.publicKey, cek, []byte{}) - case RSA_OAEP_256: - return rsa.EncryptOAEP(sha256.New(), randReader, ctx.publicKey, cek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Decrypt the given payload and return the content encryption key. -func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - return ctx.decrypt(recipient.encryptedKey, KeyAlgorithm(headers.Alg), generator) -} - -// Decrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { - // Note: The random reader on decrypt operations is only used for blinding, - // so stubbing is meanlingless (hence the direct use of rand.Reader). - switch alg { - case RSA1_5: - defer func() { - // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload - // because of an index out of bounds error, which we want to ignore. - // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() - // only exists for preventing crashes with unpatched versions. - // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k - // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 - _ = recover() - }() - - // Perform some input validation. - keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 - if keyBytes != len(jek) { - // Input size is incorrect, the encrypted payload should always match - // the size of the public modulus (e.g. using a 2048 bit key will - // produce 256 bytes of output). Reject this since it's invalid input. - return nil, ErrCryptoFailure - } - - cek, _, err := generator.genKey() - if err != nil { - return nil, ErrCryptoFailure - } - - // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to - // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing - // the Million Message Attack on Cryptographic Message Syntax". We are - // therefore deliberatly ignoring errors here. - _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) - - return cek, nil - case RSA_OAEP: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - case RSA_OAEP_256: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Sign the given payload -func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return Signature{}, ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - var out []byte - var err error - - switch alg { - case RS256, RS384, RS512: - out, err = rsa.SignPKCS1v15(randReader, ctx.privateKey, hash, hashed) - case PS256, PS384, PS512: - out, err = rsa.SignPSS(randReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }) - } - - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - switch alg { - case RS256, RS384, RS512: - return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) - case PS256, PS384, PS512: - return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) - } - - return ErrUnsupportedAlgorithm -} - -// Encrypt the given payload and update the object. -func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - switch alg { - case ECDH_ES: - // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. - return recipientInfo{ - header: &rawHeader{}, - }, nil - case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientInfo{}, ErrUnsupportedAlgorithm - } - - generator := ecKeyGenerator{ - algID: string(alg), - publicKey: ctx.publicKey, - } - - switch alg { - case ECDH_ES_A128KW: - generator.size = 16 - case ECDH_ES_A192KW: - generator.size = 24 - case ECDH_ES_A256KW: - generator.size = 32 - } - - kek, header, err := generator.genKey() - if err != nil { - return recipientInfo{}, err - } - - block, err := aes.NewCipher(kek) - if err != nil { - return recipientInfo{}, err - } - - jek, err := josecipher.KeyWrap(block, cek) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: jek, - header: &header, - }, nil -} - -// Get key size for EC key generator -func (ctx ecKeyGenerator) keySize() int { - return ctx.size -} - -// Get a content encryption key for ECDH-ES -func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { - priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, randReader) - if err != nil { - return nil, rawHeader{}, err - } - - out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) - - headers := rawHeader{ - Epk: &JsonWebKey{ - Key: &priv.PublicKey, - }, - } - - return out, headers, nil -} - -// Decrypt the given payload and return the content encryption key. -func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - if headers.Epk == nil { - return nil, errors.New("square/go-jose: missing epk header") - } - - publicKey, ok := headers.Epk.Key.(*ecdsa.PublicKey) - if publicKey == nil || !ok { - return nil, errors.New("square/go-jose: invalid epk header") - } - - apuData := headers.Apu.bytes() - apvData := headers.Apv.bytes() - - deriveKey := func(algID string, size int) []byte { - return josecipher.DeriveECDHES(algID, apuData, apvData, ctx.privateKey, publicKey, size) - } - - var keySize int - - switch KeyAlgorithm(headers.Alg) { - case ECDH_ES: - // ECDH-ES uses direct key agreement, no key unwrapping necessary. - return deriveKey(string(headers.Enc), generator.keySize()), nil - case ECDH_ES_A128KW: - keySize = 16 - case ECDH_ES_A192KW: - keySize = 24 - case ECDH_ES_A256KW: - keySize = 32 - default: - return nil, ErrUnsupportedAlgorithm - } - - key := deriveKey(headers.Alg, keySize) - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - return josecipher.KeyUnwrap(block, recipient.encryptedKey) -} - -// Sign the given payload -func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var expectedBitSize int - var hash crypto.Hash - - switch alg { - case ES256: - expectedBitSize = 256 - hash = crypto.SHA256 - case ES384: - expectedBitSize = 384 - hash = crypto.SHA384 - case ES512: - expectedBitSize = 521 - hash = crypto.SHA512 - } - - curveBits := ctx.privateKey.Curve.Params().BitSize - if expectedBitSize != curveBits { - return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(randReader, ctx.privateKey, hashed) - if err != nil { - return Signature{}, err - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var keySize int - var hash crypto.Hash - - switch alg { - case ES256: - keySize = 32 - hash = crypto.SHA256 - case ES384: - keySize = 48 - hash = crypto.SHA384 - case ES512: - keySize = 66 - hash = crypto.SHA512 - } - - if len(signature) != 2*keySize { - return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r := big.NewInt(0).SetBytes(signature[:keySize]) - s := big.NewInt(0).SetBytes(signature[keySize:]) - - match := ecdsa.Verify(ctx.publicKey, hashed, r, s) - if !match { - return errors.New("square/go-jose: ecdsa signature failed to verify") - } - - return nil -} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go deleted file mode 100644 index a5c35834..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go +++ /dev/null @@ -1,196 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" - "crypto/subtle" - "encoding/binary" - "errors" - "hash" -) - -const ( - nonceBytes = 16 -) - -// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. -func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { - keySize := len(key) / 2 - integrityKey := key[:keySize] - encryptionKey := key[keySize:] - - blockCipher, err := newBlockCipher(encryptionKey) - if err != nil { - return nil, err - } - - var hash func() hash.Hash - switch keySize { - case 16: - hash = sha256.New - case 24: - hash = sha512.New384 - case 32: - hash = sha512.New - } - - return &cbcAEAD{ - hash: hash, - blockCipher: blockCipher, - authtagBytes: keySize, - integrityKey: integrityKey, - }, nil -} - -// An AEAD based on CBC+HMAC -type cbcAEAD struct { - hash func() hash.Hash - authtagBytes int - integrityKey []byte - blockCipher cipher.Block -} - -func (ctx *cbcAEAD) NonceSize() int { - return nonceBytes -} - -func (ctx *cbcAEAD) Overhead() int { - // Maximum overhead is block size (for padding) plus auth tag length, where - // the length of the auth tag is equivalent to the key size. - return ctx.blockCipher.BlockSize() + ctx.authtagBytes -} - -// Seal encrypts and authenticates the plaintext. -func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { - // Output buffer -- must take care not to mangle plaintext input. - ciphertext := make([]byte, len(plaintext)+ctx.Overhead())[:len(plaintext)] - copy(ciphertext, plaintext) - ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) - - cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) - - cbc.CryptBlocks(ciphertext, ciphertext) - authtag := ctx.computeAuthTag(data, nonce, ciphertext) - - ret, out := resize(dst, len(dst)+len(ciphertext)+len(authtag)) - copy(out, ciphertext) - copy(out[len(ciphertext):], authtag) - - return ret -} - -// Open decrypts and authenticates the ciphertext. -func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { - if len(ciphertext) < ctx.authtagBytes { - return nil, errors.New("square/go-jose: invalid ciphertext (too short)") - } - - offset := len(ciphertext) - ctx.authtagBytes - expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) - match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) - if match != 1 { - return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)") - } - - cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) - - // Make copy of ciphertext buffer, don't want to modify in place - buffer := append([]byte{}, []byte(ciphertext[:offset])...) - - if len(buffer)%ctx.blockCipher.BlockSize() > 0 { - return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)") - } - - cbc.CryptBlocks(buffer, buffer) - - // Remove padding - plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) - if err != nil { - return nil, err - } - - ret, out := resize(dst, len(dst)+len(plaintext)) - copy(out, plaintext) - - return ret, nil -} - -// Compute an authentication tag -func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { - buffer := make([]byte, len(aad)+len(nonce)+len(ciphertext)+8) - n := 0 - n += copy(buffer, aad) - n += copy(buffer[n:], nonce) - n += copy(buffer[n:], ciphertext) - binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad)*8)) - - // According to documentation, Write() on hash.Hash never fails. - hmac := hmac.New(ctx.hash, ctx.integrityKey) - _, _ = hmac.Write(buffer) - - return hmac.Sum(nil)[:ctx.authtagBytes] -} - -// resize ensures the the given slice has a capacity of at least n bytes. -// If the capacity of the slice is less than n, a new slice is allocated -// and the existing data will be copied. -func resize(in []byte, n int) (head, tail []byte) { - if cap(in) >= n { - head = in[:n] - } else { - head = make([]byte, n) - copy(head, in) - } - - tail = head[len(in):] - return -} - -// Apply padding -func padBuffer(buffer []byte, blockSize int) []byte { - missing := blockSize - (len(buffer) % blockSize) - ret, out := resize(buffer, len(buffer)+missing) - padding := bytes.Repeat([]byte{byte(missing)}, missing) - copy(out, padding) - return ret -} - -// Remove padding -func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { - if len(buffer)%blockSize != 0 { - return nil, errors.New("square/go-jose: invalid padding") - } - - last := buffer[len(buffer)-1] - count := int(last) - - if count == 0 || count > blockSize || count > len(buffer) { - return nil, errors.New("square/go-jose: invalid padding") - } - - padding := bytes.Repeat([]byte{last}, count) - if !bytes.HasSuffix(buffer, padding) { - return nil, errors.New("square/go-jose: invalid padding") - } - - return buffer[:len(buffer)-count], nil -} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go b/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go deleted file mode 100644 index cbb5f7b8..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "encoding/binary" - "hash" - "io" -) - -type concatKDF struct { - z, info []byte - i uint32 - cache []byte - hasher hash.Hash -} - -// NewConcatKDF builds a KDF reader based on the given inputs. -func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { - buffer := make([]byte, len(algID)+len(ptyUInfo)+len(ptyVInfo)+len(supPubInfo)+len(supPrivInfo)) - n := 0 - n += copy(buffer, algID) - n += copy(buffer[n:], ptyUInfo) - n += copy(buffer[n:], ptyVInfo) - n += copy(buffer[n:], supPubInfo) - copy(buffer[n:], supPrivInfo) - - hasher := hash.New() - - return &concatKDF{ - z: z, - info: buffer, - hasher: hasher, - cache: []byte{}, - i: 1, - } -} - -func (ctx *concatKDF) Read(out []byte) (int, error) { - copied := copy(out, ctx.cache) - ctx.cache = ctx.cache[copied:] - - for copied < len(out) { - ctx.hasher.Reset() - - // Write on a hash.Hash never fails - _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) - _, _ = ctx.hasher.Write(ctx.z) - _, _ = ctx.hasher.Write(ctx.info) - - hash := ctx.hasher.Sum(nil) - chunkCopied := copy(out[copied:], hash) - copied += chunkCopied - ctx.cache = hash[chunkCopied:] - - ctx.i++ - } - - return copied, nil -} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go b/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go deleted file mode 100644 index c6a5a821..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go +++ /dev/null @@ -1,51 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "crypto/ecdsa" - "encoding/binary" -) - -// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. -func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { - // algId, partyUInfo, partyVInfo inputs must be prefixed with the length - algID := lengthPrefixed([]byte(alg)) - ptyUInfo := lengthPrefixed(apuData) - ptyVInfo := lengthPrefixed(apvData) - - // suppPubInfo is the encoded length of the output size in bits - supPubInfo := make([]byte, 4) - binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) - - z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) - reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) - - key := make([]byte, size) - - // Read on the KDF will never fail - _, _ = reader.Read(key) - return key -} - -func lengthPrefixed(data []byte) []byte { - out := make([]byte, len(data)+4) - binary.BigEndian.PutUint32(out, uint32(len(data))) - copy(out[4:], data) - return out -} diff --git a/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go b/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go deleted file mode 100644 index 1d36d501..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go +++ /dev/null @@ -1,109 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto/cipher" - "crypto/subtle" - "encoding/binary" - "errors" -) - -var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} - -// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. -func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { - if len(cek)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") - } - - n := len(cek) / 8 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], cek[i*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer, defaultIV) - - for t := 0; t < 6*n; t++ { - copy(buffer[8:], r[t%n]) - - block.Encrypt(buffer, buffer) - - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(r[t%n], buffer[8:]) - } - - out := make([]byte, (n+1)*8) - copy(out, buffer[:8]) - for i := range r { - copy(out[(i+1)*8:], r[i]) - } - - return out, nil -} - -// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. -func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { - if len(ciphertext)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") - } - - n := (len(ciphertext) / 8) - 1 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], ciphertext[(i+1)*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer[:8], ciphertext[:8]) - - for t := 6*n - 1; t >= 0; t-- { - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(buffer[8:], r[t%n]) - - block.Decrypt(buffer, buffer) - - copy(r[t%n], buffer[8:]) - } - - if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { - return nil, errors.New("square/go-jose: failed to unwrap key") - } - - out := make([]byte, n*8) - for i := range r { - copy(out[i*8:], r[i]) - } - - return out, nil -} diff --git a/vendor/gopkg.in/square/go-jose.v1/crypter.go b/vendor/gopkg.in/square/go-jose.v1/crypter.go deleted file mode 100644 index f61af2c0..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/crypter.go +++ /dev/null @@ -1,349 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rsa" - "fmt" - "reflect" -) - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter interface { - Encrypt(plaintext []byte) (*JsonWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) - SetCompression(alg CompressionAlgorithm) -} - -// MultiEncrypter represents an encrypter which supports multiple recipients. -type MultiEncrypter interface { - Encrypt(plaintext []byte) (*JsonWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) - SetCompression(alg CompressionAlgorithm) - AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) error -} - -// A generic content cipher -type contentCipher interface { - keySize() int - encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) - decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) -} - -// A key generator (for generating/getting a CEK) -type keyGenerator interface { - keySize() int - genKey() ([]byte, rawHeader, error) -} - -// A generic key encrypter -type keyEncrypter interface { - encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key -} - -// A generic key decrypter -type keyDecrypter interface { - decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key -} - -// A generic encrypter based on the given key encrypter and content cipher. -type genericEncrypter struct { - contentAlg ContentEncryption - compressionAlg CompressionAlgorithm - cipher contentCipher - recipients []recipientKeyInfo - keyGenerator keyGenerator -} - -type recipientKeyInfo struct { - keyID string - keyAlg KeyAlgorithm - keyEncrypter keyEncrypter -} - -// SetCompression sets a compression algorithm to be applied before encryption. -func (ctx *genericEncrypter) SetCompression(compressionAlg CompressionAlgorithm) { - ctx.compressionAlg = compressionAlg -} - -// NewEncrypter creates an appropriate encrypter based on the key type -func NewEncrypter(alg KeyAlgorithm, enc ContentEncryption, encryptionKey interface{}) (Encrypter, error) { - encrypter := &genericEncrypter{ - contentAlg: enc, - compressionAlg: NONE, - recipients: []recipientKeyInfo{}, - cipher: getContentCipher(enc), - } - - if encrypter.cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - var keyID string - var rawKey interface{} - switch encryptionKey := encryptionKey.(type) { - case *JsonWebKey: - keyID = encryptionKey.KeyID - rawKey = encryptionKey.Key - default: - rawKey = encryptionKey - } - - switch alg { - case DIRECT: - // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), - } - recipient, _ := newSymmetricRecipient(alg, rawKey.([]byte)) - if keyID != "" { - recipient.keyID = keyID - } - encrypter.recipients = []recipientKeyInfo{recipient} - return encrypter, nil - case ECDH_ES: - // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = ecKeyGenerator{ - size: encrypter.cipher.keySize(), - algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), - } - recipient, _ := newECDHRecipient(alg, rawKey.(*ecdsa.PublicKey)) - if keyID != "" { - recipient.keyID = keyID - } - encrypter.recipients = []recipientKeyInfo{recipient} - return encrypter, nil - default: - // Can just add a standard recipient - encrypter.keyGenerator = randomKeyGenerator{ - size: encrypter.cipher.keySize(), - } - err := encrypter.AddRecipient(alg, encryptionKey) - return encrypter, err - } -} - -// NewMultiEncrypter creates a multi-encrypter based on the given parameters -func NewMultiEncrypter(enc ContentEncryption) (MultiEncrypter, error) { - cipher := getContentCipher(enc) - - if cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - encrypter := &genericEncrypter{ - contentAlg: enc, - compressionAlg: NONE, - recipients: []recipientKeyInfo{}, - cipher: cipher, - keyGenerator: randomKeyGenerator{ - size: cipher.keySize(), - }, - } - - return encrypter, nil -} - -func (ctx *genericEncrypter) AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) (err error) { - var recipient recipientKeyInfo - - switch alg { - case DIRECT, ECDH_ES: - return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", alg) - } - - recipient, err = makeJWERecipient(alg, encryptionKey) - - if err == nil { - ctx.recipients = append(ctx.recipients, recipient) - } - return err -} - -func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { - switch encryptionKey := encryptionKey.(type) { - case *rsa.PublicKey: - return newRSARecipient(alg, encryptionKey) - case *ecdsa.PublicKey: - return newECDHRecipient(alg, encryptionKey) - case []byte: - return newSymmetricRecipient(alg, encryptionKey) - case *JsonWebKey: - recipient, err := makeJWERecipient(alg, encryptionKey.Key) - if err == nil && encryptionKey.KeyID != "" { - recipient.keyID = encryptionKey.KeyID - } - return recipient, err - default: - return recipientKeyInfo{}, ErrUnsupportedKeyType - } -} - -// newDecrypter creates an appropriate decrypter based on the key type -func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { - switch decryptionKey := decryptionKey.(type) { - case *rsa.PrivateKey: - return &rsaDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case *ecdsa.PrivateKey: - return &ecDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case []byte: - return &symmetricKeyCipher{ - key: decryptionKey, - }, nil - case *JsonWebKey: - return newDecrypter(decryptionKey.Key) - default: - return nil, ErrUnsupportedKeyType - } -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JsonWebEncryption, error) { - return ctx.EncryptWithAuthData(plaintext, nil) -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JsonWebEncryption, error) { - obj := &JsonWebEncryption{} - obj.aad = aad - - obj.protected = &rawHeader{ - Enc: ctx.contentAlg, - } - obj.recipients = make([]recipientInfo, len(ctx.recipients)) - - if len(ctx.recipients) == 0 { - return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") - } - - cek, headers, err := ctx.keyGenerator.genKey() - if err != nil { - return nil, err - } - - obj.protected.merge(&headers) - - for i, info := range ctx.recipients { - recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) - if err != nil { - return nil, err - } - - recipient.header.Alg = string(info.keyAlg) - if info.keyID != "" { - recipient.header.Kid = info.keyID - } - obj.recipients[i] = recipient - } - - if len(ctx.recipients) == 1 { - // Move per-recipient headers into main protected header if there's - // only a single recipient. - obj.protected.merge(obj.recipients[0].header) - obj.recipients[0].header = nil - } - - if ctx.compressionAlg != NONE { - plaintext, err = compress(ctx.compressionAlg, plaintext) - if err != nil { - return nil, err - } - - obj.protected.Zip = ctx.compressionAlg - } - - authData := obj.computeAuthData() - parts, err := ctx.cipher.encrypt(cek, authData, plaintext) - if err != nil { - return nil, err - } - - obj.iv = parts.iv - obj.ciphertext = parts.ciphertext - obj.tag = parts.tag - - return obj, nil -} - -// Decrypt and validate the object and return the plaintext. -func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { - headers := obj.mergedHeaders(nil) - - if len(headers.Crit) > 0 { - return nil, fmt.Errorf("square/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return nil, err - } - - cipher := getContentCipher(headers.Enc) - if cipher == nil { - return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.Enc)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - var plaintext []byte - for _, recipient := range obj.recipients { - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - if err == nil { - break - } - } - } - - if plaintext == nil { - return nil, ErrCryptoFailure - } - - // The "zip" header paramter may only be present in the protected header. - if obj.protected.Zip != "" { - plaintext, err = decompress(obj.protected.Zip, plaintext) - } - - return plaintext, err -} diff --git a/vendor/gopkg.in/square/go-jose.v1/doc.go b/vendor/gopkg.in/square/go-jose.v1/doc.go deleted file mode 100644 index b4cd1e98..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. For the moment, it mainly focuses on -encryption and signing based on the JSON Web Encryption and JSON Web Signature -standards. The library supports both the compact and full serialization -formats, and has optional support for multiple recipients. - -*/ -package jose // import "gopkg.in/square/go-jose.v1" diff --git a/vendor/gopkg.in/square/go-jose.v1/encoding.go b/vendor/gopkg.in/square/go-jose.v1/encoding.go deleted file mode 100644 index 3e2ac0ae..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/encoding.go +++ /dev/null @@ -1,191 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "compress/flate" - "encoding/base64" - "encoding/binary" - "io" - "math/big" - "regexp" - "strings" -) - -var stripWhitespaceRegex = regexp.MustCompile("\\s") - -// Url-safe base64 encode that strips padding -func base64URLEncode(data []byte) string { - var result = base64.URLEncoding.EncodeToString(data) - return strings.TrimRight(result, "=") -} - -// Url-safe base64 decoder that adds padding -func base64URLDecode(data string) ([]byte, error) { - var missing = (4 - len(data)%4) % 4 - data += strings.Repeat("=", missing) - return base64.URLEncoding.DecodeString(data) -} - -// Helper function to serialize known-good objects. -// Precondition: value is not a nil pointer. -func mustSerializeJSON(value interface{}) []byte { - out, err := MarshalJSON(value) - if err != nil { - panic(err) - } - // We never want to serialize the top-level value "null," since it's not a - // valid JOSE message. But if a caller passes in a nil pointer to this method, - // MarshalJSON will happily serialize it as the top-level value "null". If - // that value is then embedded in another operation, for instance by being - // base64-encoded and fed as input to a signing algorithm - // (https://github.com/square/go-jose/issues/22), the result will be - // incorrect. Because this method is intended for known-good objects, and a nil - // pointer is not a known-good object, we are free to panic in this case. - // Note: It's not possible to directly check whether the data pointed at by an - // interface is a nil pointer, so we do this hacky workaround. - // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I - if string(out) == "null" { - panic("Tried to serialize a nil pointer.") - } - return out -} - -// Strip all newlines and whitespace -func stripWhitespace(data string) string { - return stripWhitespaceRegex.ReplaceAllString(data, "") -} - -// Perform compression based on algorithm -func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return deflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Perform decompression based on algorithm -func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return inflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Compress with DEFLATE -func deflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - - // Writing to byte buffer, err is always nil - writer, _ := flate.NewWriter(output, 1) - _, _ = io.Copy(writer, bytes.NewBuffer(input)) - - err := writer.Close() - return output.Bytes(), err -} - -// Decompress with DEFLATE -func inflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - reader := flate.NewReader(bytes.NewBuffer(input)) - - _, err := io.Copy(output, reader) - if err != nil { - return nil, err - } - - err = reader.Close() - return output.Bytes(), err -} - -// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. -type byteBuffer struct { - data []byte -} - -func newBuffer(data []byte) *byteBuffer { - if data == nil { - return nil - } - return &byteBuffer{ - data: data, - } -} - -func newFixedSizeBuffer(data []byte, length int) *byteBuffer { - if len(data) > length { - panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") - } - pad := make([]byte, length-len(data)) - return newBuffer(append(pad, data...)) -} - -func newBufferFromInt(num uint64) *byteBuffer { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, num) - return newBuffer(bytes.TrimLeft(data, "\x00")) -} - -func (b *byteBuffer) MarshalJSON() ([]byte, error) { - return MarshalJSON(b.base64()) -} - -func (b *byteBuffer) UnmarshalJSON(data []byte) error { - var encoded string - err := UnmarshalJSON(data, &encoded) - if err != nil { - return err - } - - if encoded == "" { - return nil - } - - decoded, err := base64URLDecode(encoded) - if err != nil { - return err - } - - *b = *newBuffer(decoded) - - return nil -} - -func (b *byteBuffer) base64() string { - return base64URLEncode(b.data) -} - -func (b *byteBuffer) bytes() []byte { - // Handling nil here allows us to transparently handle nil slices when serializing. - if b == nil { - return nil - } - return b.data -} - -func (b byteBuffer) bigInt() *big.Int { - return new(big.Int).SetBytes(b.data) -} - -func (b byteBuffer) toInt() int { - return int(b.bigInt().Int64()) -} diff --git a/vendor/gopkg.in/square/go-jose.v1/json/LICENSE b/vendor/gopkg.in/square/go-jose.v1/json/LICENSE deleted file mode 100644 index 74487567..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/json/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/square/go-jose.v1/json/README.md b/vendor/gopkg.in/square/go-jose.v1/json/README.md deleted file mode 100644 index 86de5e55..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/json/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Safe JSON - -This repository contains a fork of the `encoding/json` package from Go 1.6. - -The following changes were made: - -* Object deserialization uses case-sensitive member name matching instead of - [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). - This is to avoid differences in the interpretation of JOSE messages between - go-jose and libraries written in other languages. -* When deserializing a JSON object, we check for duplicate keys and reject the - input whenever we detect a duplicate. Rather than trying to work with malformed - data, we prefer to reject it right away. diff --git a/vendor/gopkg.in/square/go-jose.v1/json/decode.go b/vendor/gopkg.in/square/go-jose.v1/json/decode.go deleted file mode 100644 index 37457e5a..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/json/decode.go +++ /dev/null @@ -1,1183 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// Unmarshal will only set exported fields of the struct. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice length -// to zero and then appends each element to the slice. -// As a special case, to unmarshal an empty JSON array into a slice, -// Unmarshal replaces the slice with a new empty slice. -// -// To unmarshal a JSON array into a Go array, Unmarshal decodes -// JSON array elements into corresponding Go array elements. -// If the Go array is smaller than the JSON array, -// the additional JSON array elements are discarded. -// If the JSON array is smaller than the Go array, -// the additional Go array elements are set to zero values. -// -// To unmarshal a JSON object into a string-keyed map, Unmarshal first -// establishes a map to use, If the map is nil, Unmarshal allocates a new map. -// Otherwise Unmarshal reuses the existing map, keeping existing entries. -// Unmarshal then stores key-value pairs from the JSON object into the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshaling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and http://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - useNumber bool -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := d.data[d.off] - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, []byte(key)) { - f = ff - break - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64 or a Number -// depending on the setting of d.useNumber. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - if d.useNumber { - return Number(s), nil - } - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.SetBytes(b[:n]) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - if !isValidNumber(s) { - d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) - } - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/gopkg.in/square/go-jose.v1/json/encode.go b/vendor/gopkg.in/square/go-jose.v1/json/encode.go deleted file mode 100644 index 1dae8bb7..00000000 --- a/vendor/gopkg.in/square/go-jose.v1/json/encode.go +++ /dev/null @@ -1,1197 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method. -// The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML